repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/wholegraph/cpp
|
rapidsai_public_repos/wholegraph/cpp/tests/parallel_utils_tests.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <cuda_runtime_api.h>
#include <sys/mman.h>
#include <atomic>
#include "parallel_utils.hpp"
TEST(ParallelUtilsTest, MultiThreadRun)
{
std::atomic<int> thread_count = 0;
std::atomic<int> thread_mask = 0;
MultiThreadRun(8, [&thread_count, &thread_mask](int rank, int size) {
thread_count.fetch_add(1);
thread_mask.fetch_or(1 << rank);
EXPECT_EQ(size, 8);
});
EXPECT_EQ(thread_count.load(), 8);
EXPECT_EQ(thread_mask.load(), (1 << 8) - 1);
}
TEST(ParallelUtilsTest, MultiProcessRun)
{
int* shared_info = static_cast<int*>(
mmap(nullptr, sizeof(int) * 2, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0));
int* thread_count = shared_info;
int* thread_mask = shared_info + 1;
*thread_count = 0;
*thread_mask = 0;
MultiProcessRun(8, [thread_count, thread_mask](int rank, int world_size) {
reinterpret_cast<std::atomic<int>*>(thread_count)->fetch_add(1);
reinterpret_cast<std::atomic<int>*>(thread_mask)->fetch_or(1 << rank);
EXPECT_EQ(world_size, 8);
// need to manually check gtest failures and modify exit code, WHOLEMEMORY_CHECK can help do
// thisl
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
});
EXPECT_EQ(*thread_count, 8);
EXPECT_EQ(*thread_mask, (1 << 8) - 1);
}
TEST(ParallelUtilsTest, PipeBroadcast)
{
const int nproc = 8;
std::vector<std::array<int, 2>> pipes;
CreatePipes(&pipes, nproc);
for (int root = 0; root < nproc; root++) {
MultiProcessRun(nproc, [root, &pipes](int rank, int world_size) {
int data = rank * 10;
PipeBroadcast(rank, world_size, root, pipes, &data);
EXPECT_EQ(data, root * 10);
// need to manually check gtest failures and modify exit code, WHOLEMEMORY_CHECK can help do
// thisl
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
});
}
ClosePipes(&pipes);
}
TEST(ParallelUtilsTest, GroupPipeBroadcast)
{
const int nproc = 8;
std::vector<std::array<int, 2>> pipes;
CreatePipes(&pipes, nproc);
const int group_size = 4;
for (int group_root = 0; group_root < group_size; group_root++) {
MultiProcessRun(nproc, [group_root, &pipes](int rank, int world_size) {
int data = rank * 10;
PipeGroupBroadcast(rank, world_size, group_root, group_size, pipes, &data);
int rank_group_root = group_root + rank / group_size * group_size;
EXPECT_EQ(data, rank_group_root * 10);
// need to manually check gtest failures and modify exit code, WHOLEMEMORY_CHECK can help do
// thisl
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
});
}
ClosePipes(&pipes);
}
TEST(ParallelUtilsTest, ForkGetDeviceCount)
{
int dev_count_fork = ForkGetDeviceCount();
int dev_count_cuda;
EXPECT_EQ(cudaGetDeviceCount(&dev_count_cuda), cudaSuccess);
EXPECT_EQ(dev_count_cuda, dev_count_fork);
}
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/wholememory_ops/wholememory_scatter_tests.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <wholememory/tensor_description.h>
#include <wholememory/wholememory.h>
#include <wholememory/wholememory_op.h>
#include "parallel_utils.hpp"
#include "wholememory/communicator.hpp"
#include "wholememory/env_func_ptrs.hpp"
#include "../wholememory/wholememory_test_utils.hpp"
#include "embedding_test_utils.hpp"
typedef struct WholeMemoryScatterTestParam {
wholememory_matrix_description_t get_embedding_desc() const
{
int64_t matrix_sizes[2] = {embedding_entry_count, embedding_dim};
return wholememory_create_matrix_desc(
matrix_sizes, embedding_stride, embedding_storage_offset, embedding_type);
}
wholememory_array_description_t get_indices_desc() const
{
return wholememory_create_array_desc(indices_count, indices_storage_offset, indices_type);
}
wholememory_matrix_description_t get_input_desc() const
{
int64_t input_sizes[2] = {indices_count, embedding_dim};
return wholememory_create_matrix_desc(
input_sizes, input_stride, input_storage_offset, input_type);
}
int64_t get_embedding_granularity() const
{
return embedding_stride * wholememory_dtype_get_element_size(embedding_type);
}
WholeMemoryScatterTestParam& set_memory_type(wholememory_memory_type_t new_memory_type)
{
memory_type = new_memory_type;
return *this;
}
WholeMemoryScatterTestParam& set_memory_location(
wholememory_memory_location_t new_memory_location)
{
memory_location = new_memory_location;
return *this;
}
WholeMemoryScatterTestParam& set_entry_count(int64_t entry_count)
{
embedding_entry_count = entry_count;
return *this;
}
WholeMemoryScatterTestParam& set_embedding_dim(int64_t new_embedding_dim)
{
embedding_dim = new_embedding_dim;
if (embedding_stride < embedding_dim) embedding_stride = embedding_dim;
if (input_stride < embedding_dim) input_stride = embedding_dim;
return *this;
}
WholeMemoryScatterTestParam& set_embedding_stride(int64_t new_embedding_stride)
{
embedding_stride = new_embedding_stride;
return *this;
}
WholeMemoryScatterTestParam& set_input_stride(int64_t new_input_stride)
{
input_stride = new_input_stride;
return *this;
}
WholeMemoryScatterTestParam& set_indices_count(int64_t new_indices_count)
{
indices_count = new_indices_count;
return *this;
}
WholeMemoryScatterTestParam& set_embedding_type(wholememory_dtype_t new_embedding_type)
{
embedding_type = new_embedding_type;
return *this;
}
WholeMemoryScatterTestParam& set_indices_type(wholememory_dtype_t new_indices_type)
{
indices_type = new_indices_type;
return *this;
}
WholeMemoryScatterTestParam& set_input_type(wholememory_dtype_t new_input_type)
{
input_type = new_input_type;
return *this;
}
wholememory_memory_type_t memory_type = WHOLEMEMORY_MT_CHUNKED;
wholememory_memory_location_t memory_location = WHOLEMEMORY_ML_DEVICE;
int64_t embedding_entry_count = 1000000LL;
int64_t embedding_dim = 32;
int64_t embedding_stride = 32;
int64_t indices_count = 100000;
int64_t input_stride = 32;
wholememory_dtype_t embedding_type = WHOLEMEMORY_DT_FLOAT;
wholememory_dtype_t indices_type = WHOLEMEMORY_DT_INT;
wholememory_dtype_t input_type = WHOLEMEMORY_DT_FLOAT;
int64_t embedding_storage_offset = 0;
int64_t indices_storage_offset = 0;
int64_t input_storage_offset = 0;
} WholeMemoryScatterTestParam;
class WholeMemoryScatterParameterTests
: public ::testing::TestWithParam<WholeMemoryScatterTestParam> {};
TEST_P(WholeMemoryScatterParameterTests, ScatterTest)
{
auto params = GetParam();
int dev_count = ForkGetDeviceCount();
EXPECT_GE(dev_count, 1);
std::vector<std::array<int, 2>> pipes;
CreatePipes(&pipes, dev_count);
MultiProcessRun(dev_count, [¶ms, &pipes](int world_rank, int world_size) {
EXPECT_EQ(wholememory_init(0), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaSetDevice(world_rank), cudaSuccess);
wholememory_comm_t wm_comm = create_communicator_by_pipes(pipes, world_rank, world_size);
if (wholememory_communicator_support_type_location(
wm_comm, params.memory_type, params.memory_location) != WHOLEMEMORY_SUCCESS) {
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
if (world_rank == 0) GTEST_SKIP_("Skip due to not supported.");
return;
}
wholememory_handle_t embedding_handle;
auto embedding_desc = params.get_embedding_desc();
auto indices_desc = params.get_indices_desc();
auto input_desc = params.get_input_desc();
size_t embedding_entry_size = params.get_embedding_granularity();
EXPECT_EQ(wholememory_malloc(&embedding_handle,
wholememory_get_memory_size_from_matrix(&embedding_desc),
wm_comm,
params.memory_type,
params.memory_location,
embedding_entry_size),
WHOLEMEMORY_SUCCESS);
cudaStream_t stream;
EXPECT_EQ(cudaStreamCreate(&stream), cudaSuccess);
void *host_indices = nullptr, *dev_indices = nullptr, *dev_input_buffer = nullptr,
*dev_gather_buffer = nullptr;
void *host_gather_buffer = nullptr, *host_input_buffer = nullptr;
size_t scatter_buffer_size = wholememory_get_memory_size_from_matrix(&input_desc);
size_t indices_buffer_size = wholememory_get_memory_size_from_array(&indices_desc);
EXPECT_EQ(cudaMallocHost(&host_indices, indices_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_indices, indices_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_input_buffer, scatter_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_gather_buffer, scatter_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMallocHost(&host_input_buffer, scatter_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMallocHost(&host_gather_buffer, scatter_buffer_size), cudaSuccess);
wholememory_ops::testing::host_random_init_indices(
host_indices, indices_desc, embedding_desc.sizes[0]);
EXPECT_EQ(cudaMemcpyAsync(dev_indices,
host_indices,
wholememory_get_memory_size_from_array(&indices_desc),
cudaMemcpyHostToDevice,
stream),
cudaSuccess);
wholememory_ops::testing::device_get_expected_embedding(dev_input_buffer,
input_desc,
embedding_desc.dtype,
dev_indices,
indices_desc,
wholememory::get_default_env_func(),
stream);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
wholememory_communicator_barrier(wm_comm);
wholememory_tensor_t embedding_tensor;
wholememory_tensor_description_t embedding_tensor_desc;
wholememory_copy_matrix_desc_to_tensor(&embedding_tensor_desc, &embedding_desc);
EXPECT_EQ(wholememory_make_tensor_from_handle(
&embedding_tensor, embedding_handle, &embedding_tensor_desc),
WHOLEMEMORY_SUCCESS);
wholememory_tensor_t indices_tensor, input_tensor;
wholememory_tensor_description_t indices_tensor_desc, input_tensor_desc;
wholememory_copy_array_desc_to_tensor(&indices_tensor_desc, &indices_desc);
wholememory_copy_matrix_desc_to_tensor(&input_tensor_desc, &input_desc);
EXPECT_EQ(
wholememory_make_tensor_from_pointer(&indices_tensor, dev_indices, &indices_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(
wholememory_make_tensor_from_pointer(&input_tensor, dev_input_buffer, &input_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_scatter(input_tensor,
indices_tensor,
embedding_tensor,
wholememory::get_default_env_func(),
stream),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaGetLastError(), cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
EXPECT_EQ(wholememory_destroy_tensor(input_tensor), WHOLEMEMORY_SUCCESS);
wholememory_communicator_barrier(wm_comm);
wholememory_tensor_t gathered_tensor;
EXPECT_EQ(
wholememory_make_tensor_from_pointer(&gathered_tensor, dev_gather_buffer, &input_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_gather(embedding_tensor,
indices_tensor,
gathered_tensor,
wholememory::get_default_env_func(),
stream),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaMemcpyAsync(host_gather_buffer,
dev_gather_buffer,
wholememory_get_memory_size_from_matrix(&input_desc),
cudaMemcpyDeviceToHost,
stream),
cudaSuccess);
EXPECT_EQ(cudaMemcpyAsync(host_input_buffer,
dev_input_buffer,
wholememory_get_memory_size_from_matrix(&input_desc),
cudaMemcpyDeviceToHost,
stream),
cudaSuccess);
EXPECT_EQ(cudaGetLastError(), cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
EXPECT_EQ(wholememory_destroy_tensor(gathered_tensor), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_destroy_tensor(indices_tensor), WHOLEMEMORY_SUCCESS);
wholememory_ops::testing::host_check_embedding_same(
host_gather_buffer, input_desc, host_input_buffer, input_desc);
EXPECT_EQ(cudaFreeHost(host_indices), cudaSuccess);
EXPECT_EQ(cudaFree(dev_indices), cudaSuccess);
EXPECT_EQ(cudaFree(dev_input_buffer), cudaSuccess);
EXPECT_EQ(cudaFree(dev_gather_buffer), cudaSuccess);
EXPECT_EQ(cudaFreeHost(host_input_buffer), cudaSuccess);
EXPECT_EQ(cudaFreeHost(host_gather_buffer), cudaSuccess);
EXPECT_EQ(wholememory_destroy_tensor(embedding_tensor), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_free(embedding_handle), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
});
}
INSTANTIATE_TEST_SUITE_P(
WholeMemoryScatterOpTests,
WholeMemoryScatterParameterTests,
::testing::Values(
#if 1
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_CONTINUOUS),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_CHUNKED),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_CONTINUOUS).set_indices_count(0),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_CHUNKED).set_indices_count(0),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED).set_indices_count(0),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_memory_location(WHOLEMEMORY_ML_HOST),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_memory_location(WHOLEMEMORY_ML_HOST),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_memory_location(WHOLEMEMORY_ML_HOST),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_CONTINUOUS).set_embedding_dim(128),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_CHUNKED).set_embedding_dim(128),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_embedding_dim(128),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_embedding_dim(11)
.set_indices_count(100005),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_embedding_dim(11)
.set_indices_count(100005),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_embedding_dim(11)
.set_indices_count(100005),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_CONTINUOUS).set_embedding_dim(127),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_CHUNKED).set_embedding_dim(127),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_embedding_dim(127),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_CONTINUOUS).set_embedding_dim(129),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_CHUNKED).set_embedding_dim(129),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_embedding_dim(129),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_CONTINUOUS).set_embedding_dim(513),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_CHUNKED).set_embedding_dim(513),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_embedding_dim(513),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_embedding_type(WHOLEMEMORY_DT_HALF),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_embedding_type(WHOLEMEMORY_DT_HALF),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_embedding_type(WHOLEMEMORY_DT_HALF),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_input_type(WHOLEMEMORY_DT_HALF),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_input_type(WHOLEMEMORY_DT_HALF),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_input_type(WHOLEMEMORY_DT_HALF),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_embedding_type(WHOLEMEMORY_DT_HALF)
.set_input_type(WHOLEMEMORY_DT_HALF),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_embedding_type(WHOLEMEMORY_DT_HALF)
.set_input_type(WHOLEMEMORY_DT_HALF),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_embedding_type(WHOLEMEMORY_DT_HALF)
.set_input_type(WHOLEMEMORY_DT_HALF),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_indices_type(WHOLEMEMORY_DT_INT64),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_indices_type(WHOLEMEMORY_DT_INT64),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_indices_type(WHOLEMEMORY_DT_INT64),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_embedding_stride(33),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_CHUNKED).set_embedding_stride(33),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_embedding_stride(33),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_CONTINUOUS).set_input_stride(33),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_CHUNKED).set_input_stride(33),
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED).set_input_stride(33),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_embedding_type(WHOLEMEMORY_DT_HALF)
.set_embedding_stride(33),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_embedding_type(WHOLEMEMORY_DT_HALF)
.set_embedding_stride(33),
WholeMemoryScatterTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_embedding_type(WHOLEMEMORY_DT_HALF)
.set_embedding_stride(33),
#endif
WholeMemoryScatterTestParam().set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)));
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/wholememory_ops/embedding_test_utils.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "embedding_test_utils.hpp"
#include <cuda_bf16.h>
#include <cuda_fp16.h>
#include <gtest/gtest.h>
#include <stdio.h>
#include <experimental/random>
#include <wholememory_ops/register.hpp>
#include <wholememory_ops/temp_memory_handle.hpp>
namespace wholememory_ops {
namespace testing {
template <typename DataTypeT>
class type_convertor {
public:
using LoadTypeT = DataTypeT;
using StoreTypeT = DataTypeT;
static __device__ LoadTypeT convert_load_data(DataTypeT data)
{
return static_cast<LoadTypeT>(data);
}
static __device__ DataTypeT convert_store_data(StoreTypeT data)
{
return static_cast<DataTypeT>(data);
}
};
template <>
class type_convertor<__half> {
public:
using LoadTypeT = float;
using StoreTypeT = float;
static __device__ LoadTypeT convert_load_data(__half data)
{
return static_cast<LoadTypeT>(data);
}
static __device__ __half convert_store_data(StoreTypeT data) { return static_cast<__half>(data); }
};
template <>
class type_convertor<__nv_bfloat16> {
public:
using LoadTypeT = float;
using StoreTypeT = float;
static __device__ LoadTypeT convert_load_data(__nv_bfloat16 data)
{
return static_cast<LoadTypeT>(data);
}
static __device__ __nv_bfloat16 convert_store_data(StoreTypeT data)
{
return static_cast<__nv_bfloat16>(data);
}
};
template <typename SrcTypeT, typename DstTypeT>
__global__ void matrix_type_cast_kernel(DstTypeT* dst,
const SrcTypeT* src,
int64_t row_count,
int64_t col_count,
int dst_stride,
int src_stride)
{
int row_count_per_block = blockDim.x / col_count;
int row_idx_in_block = threadIdx.x / col_count;
int col_idx = threadIdx.x - row_idx_in_block * col_count;
int64_t row_idx = static_cast<int64_t>(blockIdx.x) * row_count_per_block + row_idx_in_block;
if (row_idx_in_block >= row_count_per_block || row_idx >= row_count) return;
auto src_data = src[row_idx * src_stride + col_idx];
auto loaded_src_data = type_convertor<SrcTypeT>::convert_load_data(src_data);
auto store_dst_data = static_cast<typename type_convertor<DstTypeT>::StoreTypeT>(loaded_src_data);
auto dst_data = type_convertor<DstTypeT>::convert_store_data(store_dst_data);
dst[row_idx * dst_stride + col_idx] = dst_data;
}
template <typename SrcTypeT, typename DstTypeT>
void matrix_test_cast(void* dst,
const void* src,
int64_t row_count,
int64_t col_count,
int dst_stride,
int src_stride,
cudaStream_t stream)
{
int threads_per_block = std::max<int>(col_count, 256);
int rows_per_block = threads_per_block / col_count;
threads_per_block = rows_per_block * col_count;
int block_count = static_cast<int>((row_count + rows_per_block - 1) / rows_per_block);
matrix_type_cast_kernel<SrcTypeT, DstTypeT>
<<<block_count, threads_per_block, 0, stream>>>(static_cast<DstTypeT*>(dst),
static_cast<const SrcTypeT*>(src),
row_count,
col_count,
dst_stride,
src_stride);
EXPECT_EQ(cudaGetLastError(), cudaSuccess);
}
REGISTER_DISPATCH_TWO_TYPES(FloatMatrixTestCast,
matrix_test_cast,
HALF_FLOAT_DOUBLE,
HALF_FLOAT_DOUBLE)
REGISTER_DISPATCH_TWO_TYPES(IntMatrixTestCast, matrix_test_cast, ALLSINT, ALLSINT)
void device_matrix_type_cast(void* dst,
wholememory_matrix_description_t dst_desc,
const void* src,
wholememory_matrix_description_t src_desc,
cudaStream_t stream)
{
EXPECT_EQ(dst_desc.sizes[0], src_desc.sizes[0]);
EXPECT_EQ(dst_desc.sizes[1], src_desc.sizes[1]);
bool is_float_src =
src_desc.dtype == WHOLEMEMORY_DT_HALF || src_desc.dtype == WHOLEMEMORY_DT_FLOAT ||
src_desc.dtype == WHOLEMEMORY_DT_DOUBLE || src_desc.dtype == WHOLEMEMORY_DT_BF16;
bool is_float_dst =
dst_desc.dtype == WHOLEMEMORY_DT_HALF || dst_desc.dtype == WHOLEMEMORY_DT_FLOAT ||
dst_desc.dtype == WHOLEMEMORY_DT_DOUBLE || dst_desc.dtype == WHOLEMEMORY_DT_BF16;
EXPECT_EQ(is_float_dst, is_float_src);
if (is_float_src) {
DISPATCH_TWO_TYPES(src_desc.dtype,
dst_desc.dtype,
FloatMatrixTestCast,
dst,
src,
src_desc.sizes[0],
src_desc.sizes[1],
dst_desc.stride,
src_desc.stride,
stream);
} else {
DISPATCH_TWO_TYPES(src_desc.dtype,
dst_desc.dtype,
IntMatrixTestCast,
dst,
src,
src_desc.sizes[0],
src_desc.sizes[1],
dst_desc.stride,
src_desc.stride,
stream);
}
}
void device_array_type_cast(void* dst,
wholememory_array_description_t dst_desc,
const void* src,
wholememory_array_description_t src_desc,
cudaStream_t stream)
{
wholememory_tensor_description_t dst_tensor_desc, src_tensor_desc;
wholememory_copy_array_desc_to_tensor(&dst_tensor_desc, &dst_desc);
wholememory_copy_array_desc_to_tensor(&src_tensor_desc, &src_desc);
wholememory_matrix_description_t dst_matrix_desc, src_matrix_desc;
EXPECT_TRUE(wholememory_convert_tensor_desc_to_matrix(&dst_matrix_desc, &dst_tensor_desc));
EXPECT_TRUE(wholememory_convert_tensor_desc_to_matrix(&src_matrix_desc, &src_tensor_desc));
device_matrix_type_cast(dst, dst_matrix_desc, src, src_matrix_desc, stream);
}
template <typename DataTypeT>
__device__ __forceinline__ DataTypeT device_get_data_from_int64(int64_t data)
{
return static_cast<DataTypeT>(data);
}
template <typename DataTypeT>
__device__ __forceinline__ DataTypeT device_cast_int64_t_to_float(int64_t data)
{
return static_cast<DataTypeT>(data);
}
template <>
__device__ __forceinline__ __half device_cast_int64_t_to_float(int64_t data)
{
return static_cast<__half>(static_cast<float>(data));
}
template <>
__device__ __forceinline__ __nv_bfloat16 device_cast_int64_t_to_float(int64_t data)
{
return static_cast<__nv_bfloat16>(static_cast<float>(data));
}
template <int M, int E, typename DataTypeT>
__device__ __forceinline__ DataTypeT device_get_float_data_from_int64(int64_t data)
{
static_assert(M > 0, "M should be larget than 0.");
static_assert(E > 0, "M should be larget than 0.");
static_assert(M + E + 1 == sizeof(DataTypeT) * 8, "M + E should be sizeof(DataTypeT) * 8 - 1");
int64_t mdata = data & ((1LL << (M + 1)) - 1LL);
auto data_float = device_cast_int64_t_to_float<DataTypeT>(mdata);
return data_float;
}
template <>
__device__ __forceinline__ float device_get_data_from_int64<float>(int64_t data)
{
return device_get_float_data_from_int64<23, 8, float>(data);
}
template <>
__device__ __forceinline__ double device_get_data_from_int64<double>(int64_t data)
{
return device_get_float_data_from_int64<52, 11, double>(data);
}
template <>
__device__ __forceinline__ __half device_get_data_from_int64<__half>(int64_t data)
{
return device_get_float_data_from_int64<10, 5, __half>(data);
}
template <>
__device__ __forceinline__ __nv_bfloat16 device_get_data_from_int64<__nv_bfloat16>(int64_t data)
{
return device_get_float_data_from_int64<7, 8, __nv_bfloat16>(data);
}
template <typename DataTypeT>
__device__ __forceinline__ DataTypeT device_get_embedding_data(int64_t embedding_idx,
int embedding_dim,
int dim_idx)
{
int64_t embedding_data = embedding_idx * embedding_dim + dim_idx;
embedding_data = embedding_data * 97 + 1007;
embedding_data = embedding_idx;
return device_get_data_from_int64<DataTypeT>(embedding_data);
}
template <typename DataTypeT>
__global__ void get_embedding_data_kernel(DataTypeT* embedding_ptr,
int64_t storage_offset,
int embedding_dim,
int embedding_stride,
int64_t local_entry_start,
int64_t local_entry_count)
{
int64_t local_embedding_idx = blockIdx.x;
if (local_embedding_idx >= local_entry_count) return;
int thread_x = threadIdx.x;
embedding_ptr += storage_offset;
int64_t embedding_idx = local_entry_start + local_embedding_idx;
embedding_ptr += embedding_stride * local_embedding_idx;
for (; thread_x < embedding_dim; thread_x += blockDim.x) {
auto data = device_get_embedding_data<DataTypeT>(embedding_idx, embedding_dim, thread_x);
embedding_ptr[thread_x] = data;
}
}
template <typename DataTypeT>
void get_embedding_data(void* embedding_ptr,
wholememory_matrix_description_t embedding_desc,
int64_t local_entry_start,
int64_t local_entry_count,
cudaStream_t stream)
{
int64_t storage_offset = embedding_desc.storage_offset;
int embedding_dim = embedding_desc.sizes[1];
int embedding_stride = embedding_desc.stride;
int block_size = embedding_dim;
block_size = std::min(block_size, 256);
int block_count = local_entry_count;
get_embedding_data_kernel<DataTypeT>
<<<block_count, block_size, 0, stream>>>(static_cast<DataTypeT*>(embedding_ptr),
storage_offset,
embedding_dim,
embedding_stride,
local_entry_start,
local_entry_count);
EXPECT_EQ(cudaGetLastError(), cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
}
void device_random_init_local_embedding_table(wholememory_handle_t embedding_handle,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream)
{
void* local_embedding_ptr;
size_t local_embedding_size, local_embedding_offset;
EXPECT_EQ(
wholememory_get_local_memory(
&local_embedding_ptr, &local_embedding_size, &local_embedding_offset, embedding_handle),
WHOLEMEMORY_SUCCESS);
int64_t embedding_entry_size =
embedding_desc.stride * wholememory_dtype_get_element_size(embedding_desc.dtype);
EXPECT_EQ(local_embedding_size % embedding_entry_size, 0);
EXPECT_EQ(local_embedding_offset % embedding_entry_size, 0);
int64_t local_entry_start = local_embedding_offset / embedding_entry_size;
int64_t local_entry_count = local_embedding_size / embedding_entry_size;
if (local_entry_count == 0) return;
switch (embedding_desc.dtype) {
case WHOLEMEMORY_DT_FLOAT: {
get_embedding_data<float>(
local_embedding_ptr, embedding_desc, local_entry_start, local_entry_count, stream);
break;
}
case WHOLEMEMORY_DT_HALF: {
get_embedding_data<__half>(
local_embedding_ptr, embedding_desc, local_entry_start, local_entry_count, stream);
break;
}
case WHOLEMEMORY_DT_DOUBLE: {
get_embedding_data<double>(
local_embedding_ptr, embedding_desc, local_entry_start, local_entry_count, stream);
break;
}
case WHOLEMEMORY_DT_BF16: {
get_embedding_data<__nv_bfloat16>(
local_embedding_ptr, embedding_desc, local_entry_start, local_entry_count, stream);
break;
}
case WHOLEMEMORY_DT_INT: {
get_embedding_data<int>(
local_embedding_ptr, embedding_desc, local_entry_start, local_entry_count, stream);
break;
}
case WHOLEMEMORY_DT_INT64: {
get_embedding_data<int64_t>(
local_embedding_ptr, embedding_desc, local_entry_start, local_entry_count, stream);
break;
}
case WHOLEMEMORY_DT_INT16: {
get_embedding_data<int16_t>(
local_embedding_ptr, embedding_desc, local_entry_start, local_entry_count, stream);
break;
}
case WHOLEMEMORY_DT_INT8: {
get_embedding_data<int8_t>(
local_embedding_ptr, embedding_desc, local_entry_start, local_entry_count, stream);
break;
}
default: {
FAIL();
break;
}
}
}
template <typename IndexT, typename GenTypeT>
__global__ void device_get_expected_embedding_kernel(GenTypeT* gen_buffer,
int64_t storage_offset,
int embedding_dim,
int embedding_stride,
const IndexT* indices,
int indice_count)
{
int64_t block_idx = blockIdx.x;
if (block_idx >= indice_count) return;
int thread_x = threadIdx.x;
gen_buffer += storage_offset;
int64_t embedding_idx = indices[block_idx];
gen_buffer += embedding_stride * block_idx;
for (; thread_x < embedding_dim; thread_x += blockDim.x) {
auto data = device_get_embedding_data<GenTypeT>(embedding_idx, embedding_dim, thread_x);
gen_buffer[thread_x] = data;
}
}
template <typename IndexT, typename GenTypeT>
void device_get_expected_embedding_temp_func(void* gen_buffer,
wholememory_matrix_description_t gen_buffer_desc,
void* indices,
wholememory_array_description_t indices_desc,
cudaStream_t stream)
{
EXPECT_EQ(gen_buffer_desc.sizes[0], indices_desc.size);
if (indices_desc.size == 0) return;
int block_count = gen_buffer_desc.sizes[0];
int thread_count = std::min<int>(gen_buffer_desc.sizes[1], 256);
device_get_expected_embedding_kernel<IndexT, GenTypeT>
<<<block_count, thread_count, 0, stream>>>(static_cast<GenTypeT*>(gen_buffer),
gen_buffer_desc.storage_offset,
gen_buffer_desc.sizes[1],
gen_buffer_desc.stride,
static_cast<const IndexT*>(indices),
indices_desc.size);
EXPECT_EQ(cudaGetLastError(), cudaSuccess);
}
REGISTER_DISPATCH_TWO_TYPES(DeviceGetExpectedEmbedding,
device_get_expected_embedding_temp_func,
SINT3264,
ALLSINT_ALLFLOAT)
void device_get_expected_embedding(void* output,
wholememory_matrix_description_t output_desc,
wholememory_dtype_t embedding_dtype,
void* indices,
wholememory_array_description_t indices_desc,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream)
{
void* gen_buffer = output;
wholememory_ops::temp_memory_handle gen_buffer_tmh(p_env_fns);
auto gen_desc = output_desc;
if (embedding_dtype != output_desc.dtype) {
gen_desc.dtype = embedding_dtype;
gen_desc.stride = gen_desc.sizes[1];
gen_desc.storage_offset = 0;
gen_buffer = gen_buffer_tmh.device_malloc(
wholememory_get_memory_element_count_from_matrix(&gen_desc), gen_desc.dtype);
}
DISPATCH_TWO_TYPES(indices_desc.dtype,
gen_desc.dtype,
DeviceGetExpectedEmbedding,
gen_buffer,
gen_desc,
indices,
indices_desc,
stream);
if (embedding_dtype != output_desc.dtype) {
device_matrix_type_cast(output, output_desc, gen_buffer, gen_desc, stream);
}
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
}
template <typename IndexT>
void host_get_random_indices(void* indices,
wholememory_array_description_t indice_desc,
int64_t max_indices)
{
IndexT* indices_ptr = static_cast<IndexT*>(indices);
std::experimental::reseed();
for (int64_t i = 0; i < indice_desc.size; i++) {
IndexT random_index = std::experimental::randint<IndexT>(0, max_indices - 1);
indices_ptr[i + indice_desc.storage_offset] = random_index;
}
}
void host_random_init_indices(void* indices,
wholememory_array_description_t indices_desc,
int64_t max_indices)
{
EXPECT_TRUE(indices_desc.dtype == WHOLEMEMORY_DT_INT ||
indices_desc.dtype == WHOLEMEMORY_DT_INT64);
if (indices_desc.dtype == WHOLEMEMORY_DT_INT) {
host_get_random_indices<int>(indices, indices_desc, max_indices);
} else {
host_get_random_indices<int64_t>(indices, indices_desc, max_indices);
}
}
template <typename DataTypeT>
uint64_t load_hex_data(void* ptr, size_t offset)
{
DataTypeT* data_ptr = static_cast<DataTypeT*>(ptr) + offset;
uint64_t data = *data_ptr;
return data;
}
void host_check_embedding_same(void* host_embedding,
wholememory_matrix_description_t embedding_desc,
void* host_reference,
wholememory_matrix_description_t reference_desc)
{
EXPECT_EQ(embedding_desc.dtype, reference_desc.dtype);
EXPECT_EQ(embedding_desc.sizes[0], reference_desc.sizes[0]);
EXPECT_EQ(embedding_desc.sizes[1], reference_desc.sizes[1]);
int64_t row_count = embedding_desc.sizes[0];
int64_t col_count = embedding_desc.sizes[1];
size_t element_size = wholememory_dtype_get_element_size(embedding_desc.dtype);
int64_t diff_count = 0;
for (int64_t row = 0; row < row_count; row++) {
for (int64_t col = 0; col < col_count; col++) {
uint64_t embedding_data, reference_data;
if (element_size == 1) {
embedding_data = load_hex_data<uint8_t>(
host_embedding, row * embedding_desc.stride + embedding_desc.storage_offset + col);
reference_data = load_hex_data<uint8_t>(
host_reference, row * reference_desc.stride + reference_desc.storage_offset + col);
} else if (element_size == 2) {
embedding_data = load_hex_data<uint16_t>(
host_embedding, row * embedding_desc.stride + embedding_desc.storage_offset + col);
reference_data = load_hex_data<uint16_t>(
host_reference, row * reference_desc.stride + reference_desc.storage_offset + col);
} else if (element_size == 4) {
embedding_data = load_hex_data<uint32_t>(
host_embedding, row * embedding_desc.stride + embedding_desc.storage_offset + col);
reference_data = load_hex_data<uint32_t>(
host_reference, row * reference_desc.stride + reference_desc.storage_offset + col);
} else {
embedding_data = load_hex_data<uint64_t>(
host_embedding, row * embedding_desc.stride + embedding_desc.storage_offset + col);
reference_data = load_hex_data<uint64_t>(
host_reference, row * reference_desc.stride + reference_desc.storage_offset + col);
}
if (embedding_data != reference_data) {
if (diff_count < 10) {
printf("row=%ld, col=%ld, got %lx (float %f), but should be %lx (float %f)\n",
row,
col,
embedding_data,
*(float*)(&embedding_data),
reference_data,
*(float*)(&reference_data));
fflush(stdout);
EXPECT_EQ(embedding_data, reference_data);
}
diff_count++;
}
}
}
EXPECT_EQ(diff_count, 0);
}
void host_random_init_float(float* data, int64_t len, float max_value, float min_value)
{
static std::default_random_engine e;
static std::uniform_real_distribution<> dis(-1.0, 1.0); // rage 0 - 1
for (int64_t i = 0; i < len; i++) {
data[i] = dis(e);
}
}
} // namespace testing
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/wholememory_ops/cacheset_tests.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <algorithm>
#include <experimental/random>
#include <iostream>
#include <random>
#include "wholememory_ops/functions/embedding_cache_func.cuh"
template <typename DataT>
static void PrintVector(const std::vector<DataT>& v,
const std::string& name,
int padding = 0,
int value_per_line = 8)
{
std::cout << std::string(padding, ' ') << "vector " << name << " length=" << v.size() << " :";
for (size_t i = 0; i < v.size(); i++) {
if (i % value_per_line == 0) std::cout << std::endl << std::string(padding + 2, ' ');
std::cout << v[i] << ",\t";
}
std::cout << std::endl;
}
static void PrintTagVector(const std::vector<uint16_t>& v,
const std::string& name,
int padding = 0,
int value_per_line = 8)
{
std::cout << std::string(padding, ' ') << "vector " << name << " length=" << v.size() << " :";
for (size_t i = 0; i < v.size(); i++) {
if (i % value_per_line == 0) std::cout << std::endl << std::string(padding + 2, ' ');
uint16_t tag = v[i];
bool valid = tag & (1U << 14);
bool modified = tag & (1U << 15);
int lid = valid ? (int)(tag & ((1U << 14U) - 1)) : -1;
std::ostringstream oss;
oss << (valid ? "[V" : "[I");
oss << (modified ? "M" : " ");
oss << std::setw(6) << std::setfill(' ') << lid;
oss << "]";
std::cout << oss.str() << "\t";
}
std::cout << std::endl;
}
static void PrintLfuCountVector(const std::vector<uint16_t>& v,
const std::string& name,
int padding = 0,
int value_per_line = 8)
{
std::cout << std::string(padding, ' ') << "vector " << name << " length=" << v.size() << " :";
int scale = 0;
for (size_t i = 0; i < v.size(); i++) {
if (i % value_per_line == 0) std::cout << std::endl << std::string(padding + 2, ' ');
int lfu_count = v[i] & ((1U << 14) - 1);
std::cout << lfu_count << "\t";
if (v[i] & (1U << 14)) { scale |= (1U << i); }
}
std::cout << std::endl;
std::cout << std::string(padding + 2, ' ') << "lfu scale = " << scale << std::endl;
}
struct SingleCacheSetTestParam {
std::vector<int> cache_tag_lids = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
std::vector<bool> cache_tag_valid = {false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false};
std::vector<bool> cache_tag_modified = {false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false};
;
std::vector<int> cache_lfu_count = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int cache_lfu_count_scale = 0;
int cache_set_coverage = 64;
std::vector<int64_t> raw_counter = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int64_t start_gid = 0;
int to_update_id_count = 10;
std::vector<int64_t> to_update_gids = {3, 51, 20, 13, 60, 44, 37, 46, 12, 41};
std::vector<int> to_update_inc_count = {1, 2, 1, 4, 5, 6, 3, 1, 1, 6};
std::string name = "default";
SingleCacheSetTestParam& Random(int base_scale, int coverage, int update_count)
{
EXPECT_LE(update_count, coverage);
cache_set_coverage = coverage;
name = "random";
cache_lfu_count_scale = base_scale;
std::vector<int> lid_perm(coverage);
for (int i = 0; i < coverage; i++)
lid_perm[i] = i;
std::shuffle(lid_perm.begin(), lid_perm.end(), std::mt19937(std::random_device()()));
raw_counter.clear();
raw_counter.resize(coverage, -1);
for (int i = 0; i < 32 && i < coverage; i++) {
cache_tag_lids[i] = lid_perm[i];
int mask = std::experimental::randint(0, 3);
cache_tag_valid[i] = mask & 1;
cache_tag_modified[i] = mask & 2;
cache_lfu_count[i] = std::experimental::randint(1, 16383);
if (cache_tag_valid[i]) {
raw_counter[cache_tag_lids[i]] =
((int64_t)cache_lfu_count[i] << cache_lfu_count_scale) +
std::experimental::randint(0, (1 << cache_lfu_count_scale) - 1);
}
}
for (int i = 0; i < coverage; i++) {
if (raw_counter[i] == -1) {
raw_counter[i] =
std::experimental::randint<int64_t>(0, 1 << (14LL + (int64_t)cache_lfu_count_scale));
}
}
start_gid = std::experimental::randint<int64_t>(0, 1000000000LL) * coverage;
to_update_id_count = update_count;
to_update_gids.resize(to_update_id_count);
to_update_inc_count.resize(to_update_id_count);
std::shuffle(lid_perm.begin(), lid_perm.end(), std::mt19937(std::random_device()()));
for (int i = 0; i < to_update_id_count; i++) {
to_update_gids[i] = lid_perm[i] + start_gid;
to_update_inc_count[i] = std::experimental::randint(1, 16383);
}
return *this;
}
void Print() const
{
std::cout << "Test Parameter for " << name << ":" << std::endl;
std::cout << " "
<< "old_scale=" << cache_lfu_count_scale
<< ", cache_set_coverage=" << cache_set_coverage << ", start_gid=" << start_gid
<< ", to_update_id_count=" << to_update_id_count << std::endl;
PrintVector(cache_tag_lids, "cache_tag_lids", 2);
PrintVector(cache_tag_valid, "cache_tag_valid", 2);
PrintVector(cache_tag_modified, "cache_tag_modified", 2);
PrintVector(cache_lfu_count, "cache_lfu_count", 2);
PrintVector(raw_counter, "raw_counter", 2);
PrintVector(to_update_gids, "to_update_gids", 2);
PrintVector(to_update_inc_count, "to_update_inc_count", 2);
}
};
void PrintInfo(const SingleCacheSetTestParam& test_param,
const std::vector<uint16_t>& cache_tag_vec_updated,
const std::vector<uint16_t>& cache_lfu_vec_updated,
const std::vector<int64_t>& raw_counter_updated,
const std::vector<int64_t>& load_to_cache_ids,
const std::vector<int64_t>& write_back_ids)
{
std::cout << "\nTesting case " << test_param.name << std::endl;
test_param.Print();
std::cout << "*********************** Results ***********************" << std::endl;
PrintTagVector(cache_tag_vec_updated, "cache_tag_vec_updated");
PrintLfuCountVector(cache_lfu_vec_updated, "cache_lfu_vec_updated");
PrintVector(raw_counter_updated, "raw_counter_updated");
PrintVector(load_to_cache_ids, "load_to_cache_ids");
PrintVector(write_back_ids, "write_back_ids");
}
bool CheckSingleResult(const SingleCacheSetTestParam& test_param,
const std::vector<uint16_t>& cache_tag_vec_updated,
const std::vector<uint16_t>& cache_lfu_vec_updated,
const std::vector<int64_t>& raw_counter_updated,
const std::vector<int64_t>& load_to_cache_ids,
const std::vector<int64_t>& write_back_ids)
{
std::vector<int> updated_tag_lids(32);
std::vector<bool> updated_valid(32);
std::vector<bool> updated_modified(32);
std::vector<int> updated_lfu_count(32);
int update_scale = 0;
EXPECT_EQ(cache_tag_vec_updated.size(), 32);
EXPECT_EQ(cache_lfu_vec_updated.size(), 32);
std::map<int, int> old_lid_to_cacheline;
for (int i = 0; i < 32; i++) {
if (test_param.cache_tag_valid[i]) {
old_lid_to_cacheline.insert(std::pair<int, int>(test_param.cache_tag_lids[i], i));
}
}
// extract results from cache data
int in_cache_count = 0;
std::set<int> updated_lid_dedup_set;
for (int i = 0; i < cache_tag_vec_updated.size(); i++) {
int lid = cache_tag_vec_updated[i] & ((1 << 14) - 1);
bool valid = cache_tag_vec_updated[i] & (1 << 14);
if (!valid) lid = -1;
bool modified = cache_tag_vec_updated[i] & (1 << 15);
if (!valid) EXPECT_FALSE(modified);
int lfu_count = cache_lfu_vec_updated[i] & ((1 << 14) - 1);
if (!valid) lfu_count = -1;
if ((cache_lfu_vec_updated[i] & (1 << 14)) != 0) { update_scale |= (1 << i); }
if (valid) {
EXPECT_EQ(updated_lid_dedup_set.find(lid), updated_lid_dedup_set.end());
updated_lid_dedup_set.insert(lid);
in_cache_count++;
}
updated_tag_lids[i] = lid;
updated_valid[i] = valid;
updated_modified[i] = modified;
updated_lfu_count[i] = lfu_count;
}
// Check ids in cache set are corrected
std::map<int, int64_t> lid_to_count;
std::map<int, int> base_lid_to_cache_line;
for (int i = 0; i < 32; i++) {
if (test_param.cache_tag_valid[i]) {
EXPECT_GE(test_param.cache_tag_lids[i], 0);
base_lid_to_cache_line.insert(std::pair<int, int>(test_param.cache_tag_lids[i], i));
int lid = test_param.cache_tag_lids[i];
int64_t count = (static_cast<int64_t>(test_param.cache_lfu_count[i] + 1)
<< test_param.cache_lfu_count_scale) -
1;
lid_to_count[test_param.cache_tag_lids[i]] = count;
}
}
std::vector<int64_t> ref_counter_updated = test_param.raw_counter;
std::map<int, int> to_process_lid_to_array_id;
for (int i = 0; i < test_param.to_update_id_count; i++) {
int64_t gid = test_param.to_update_gids[i];
int lid = gid - test_param.start_gid;
int inc_count = test_param.to_update_inc_count[i];
ref_counter_updated[lid] += inc_count;
lid_to_count[lid] = ref_counter_updated[lid];
to_process_lid_to_array_id.insert(std::pair<int, int>(lid, i));
}
std::vector<std::tuple<int64_t, int>> count_lid_vec;
for (auto& lid_count : lid_to_count) {
count_lid_vec.push_back(std::tuple<int64_t, int>(lid_count.second, lid_count.first));
}
std::sort(count_lid_vec.begin(), count_lid_vec.end(), std::greater{});
int64_t max_lfu_count = std::get<0>(count_lid_vec.front());
int reference_updated_scale = 0;
while ((max_lfu_count >> reference_updated_scale) >= (1 << 14)) {
reference_updated_scale++;
}
EXPECT_EQ(reference_updated_scale, update_scale);
// Compute reference ids
std::set<int> must_in_cache, maybe_in_cache;
int should_in_cache_count = 32;
if (count_lid_vec.size() < 32) {
for (auto& t : count_lid_vec)
must_in_cache.insert(std::get<1>(t));
should_in_cache_count = count_lid_vec.size();
} else {
int64_t min_count = std::get<0>(count_lid_vec[32 - 1]);
for (auto& t : count_lid_vec) {
if (std::get<0>(t) > min_count) {
must_in_cache.insert(std::get<1>(t));
} else {
maybe_in_cache.insert(std::get<1>(t));
}
}
}
EXPECT_EQ(should_in_cache_count, in_cache_count);
// Check all must_in_cache lids are in cache.
for (auto must_in_lid : must_in_cache) {
EXPECT_NE(updated_lid_dedup_set.find(must_in_lid), updated_lid_dedup_set.end());
}
// Check all cached elements are in must_in_cache or maybe_in_cache
for (int i = 0; i < 32; i++) {
if (!updated_valid[i]) continue;
int lid = updated_tag_lids[i];
EXPECT_TRUE(must_in_cache.find(lid) != must_in_cache.end() ||
maybe_in_cache.find(lid) != maybe_in_cache.end());
auto it = old_lid_to_cacheline.find(lid);
// same lid won't change location.
if (it != old_lid_to_cacheline.end()) {
EXPECT_EQ(it->second, i);
EXPECT_EQ(test_param.cache_tag_modified[i], updated_modified[i]);
int64_t old_est_lfu_count =
(((int64_t)test_param.cache_lfu_count[i] + 1) << test_param.cache_lfu_count_scale) - 1;
auto it = to_process_lid_to_array_id.find(lid);
if (it != to_process_lid_to_array_id.end()) {
int64_t updated_est_lfu_count =
test_param.raw_counter[lid] + test_param.to_update_inc_count[it->second];
EXPECT_EQ(updated_est_lfu_count >> reference_updated_scale, updated_lfu_count[i])
<< "Index=" << i << ", lid=" << lid;
} else {
EXPECT_EQ(old_est_lfu_count >> reference_updated_scale, updated_lfu_count[i])
<< "Index=" << i << ", lid=" << lid;
}
} else {
EXPECT_FALSE(updated_modified[i]) << "Index=" << i << ", lid=" << lid;
auto it = to_process_lid_to_array_id.find(lid);
EXPECT_NE(it, to_process_lid_to_array_id.end());
int64_t updated_est_lfu_count =
test_param.raw_counter[lid] + test_param.to_update_inc_count[it->second];
EXPECT_EQ(updated_est_lfu_count >> reference_updated_scale, updated_lfu_count[i]);
}
}
// Check all counters are right.
for (int i = 0; i < test_param.cache_set_coverage; i++) {
EXPECT_EQ(ref_counter_updated[i], raw_counter_updated[i]);
}
// load and writeback check
int ld_count = 0, wb_count = 0;
for (int i = 0; i < 32; i++) {
if (updated_valid[i] &&
(!test_param.cache_tag_valid[i] || test_param.cache_tag_lids[i] != updated_tag_lids[i])) {
EXPECT_GT(load_to_cache_ids.size(), ld_count);
EXPECT_EQ(load_to_cache_ids[ld_count], updated_tag_lids[i] + test_param.start_gid)
<< " index=" << ld_count;
ld_count++;
}
if (test_param.cache_tag_valid[i] && test_param.cache_tag_modified[i] &&
updated_tag_lids[i] != test_param.cache_tag_lids[i]) {
EXPECT_GT(write_back_ids.size(), wb_count);
EXPECT_EQ(write_back_ids[wb_count], test_param.cache_tag_lids[i] + test_param.start_gid)
<< " index=" << wb_count;
wb_count++;
}
}
if (testing::Test::HasFailure()) { return false; }
return true;
}
class CacheSetSingleCaseTests : public ::testing::TestWithParam<SingleCacheSetTestParam> {};
__global__ void SingleCacheSetTestKernel(uint16_t* cache_set_tag_ptr,
uint16_t* cache_set_count_ptr,
int64_t* memory_lfu_counter,
const int64_t* gids,
const int* inc_count,
int64_t* need_load_to_cache_ids,
int64_t* need_write_back_ids,
int64_t set_start_id,
int id_count)
{
using Updater = wholememory_ops::CacheSetUpdater<int64_t>;
__shared__ Updater::TempStorage temp_storage;
wholememory_ops::CacheLineInfo cache_line_info;
cache_line_info.LoadInfo(cache_set_tag_ptr, cache_set_count_ptr);
Updater updater;
updater.UpdateCache<true, true>(temp_storage,
cache_line_info,
memory_lfu_counter,
gids,
inc_count,
need_load_to_cache_ids,
need_write_back_ids,
set_start_id,
id_count);
cache_line_info.StoreInfo(cache_set_tag_ptr, cache_set_count_ptr);
}
TEST_P(CacheSetSingleCaseTests, CacheSetTest)
{
static constexpr int kCacheSetSize = 32;
auto params = GetParam();
int dev_count;
EXPECT_EQ(cudaGetDeviceCount(&dev_count), cudaSuccess);
EXPECT_GE(dev_count, 1);
EXPECT_EQ(cudaSetDevice(0), cudaSuccess);
EXPECT_EQ(kCacheSetSize, params.cache_tag_lids.size());
EXPECT_EQ(kCacheSetSize, params.cache_tag_valid.size());
EXPECT_EQ(kCacheSetSize, params.cache_tag_modified.size());
EXPECT_EQ(kCacheSetSize, params.cache_lfu_count.size());
EXPECT_EQ(params.cache_set_coverage, params.raw_counter.size());
EXPECT_EQ(params.to_update_id_count, params.to_update_gids.size());
EXPECT_EQ(params.to_update_id_count, params.to_update_inc_count.size());
uint16_t *cache_tag_ptr, *cache_lfu_ptr;
EXPECT_EQ(cudaMalloc(&cache_tag_ptr, sizeof(uint16_t) * kCacheSetSize), cudaSuccess);
EXPECT_EQ(cudaMalloc(&cache_lfu_ptr, sizeof(uint16_t) * kCacheSetSize), cudaSuccess);
int64_t *to_update_ids, *write_back_ids, *load_to_cache_ids;
size_t update_ids_size = sizeof(int64_t) * params.to_update_id_count;
EXPECT_EQ(cudaMalloc(&to_update_ids, update_ids_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&write_back_ids, update_ids_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&load_to_cache_ids, update_ids_size), cudaSuccess);
int* inc_count;
EXPECT_EQ(cudaMalloc(&inc_count, sizeof(int) * params.to_update_id_count), cudaSuccess);
int64_t* raw_counter_ptr;
EXPECT_EQ(cudaMalloc(&raw_counter_ptr, sizeof(int64_t) * params.cache_set_coverage), cudaSuccess);
std::vector<int16_t> cache_tag_vec(kCacheSetSize, 0), cache_lfu_vec(kCacheSetSize, 0);
for (int i = 0; i < kCacheSetSize; i++) {
uint16_t tag_data = params.cache_tag_valid[i] ? params.cache_tag_lids[i] : 0;
uint16_t valid_modify_mask = 0;
if (params.cache_tag_valid[i]) valid_modify_mask |= (1U << 14U);
if (params.cache_tag_modified[i]) valid_modify_mask |= (1U << 15U);
tag_data |= valid_modify_mask;
cache_tag_vec[i] = tag_data;
uint32_t scale = params.cache_lfu_count_scale;
uint16_t lfu_count_data = params.cache_lfu_count[i];
if (scale & (1U << i)) { lfu_count_data |= (1U << 14U); }
cache_lfu_vec[i] = lfu_count_data;
}
EXPECT_EQ(
cudaMemcpy(
cache_tag_ptr, cache_tag_vec.data(), kCacheSetSize * sizeof(int16_t), cudaMemcpyHostToDevice),
cudaSuccess);
EXPECT_EQ(
cudaMemcpy(
cache_lfu_ptr, cache_lfu_vec.data(), kCacheSetSize * sizeof(int16_t), cudaMemcpyHostToDevice),
cudaSuccess);
EXPECT_EQ(cudaMemcpy(raw_counter_ptr,
params.raw_counter.data(),
params.cache_set_coverage * sizeof(int64_t),
cudaMemcpyHostToDevice),
cudaSuccess);
EXPECT_EQ(cudaMemcpy(to_update_ids,
params.to_update_gids.data(),
params.to_update_id_count * sizeof(int64_t),
cudaMemcpyHostToDevice),
cudaSuccess);
EXPECT_EQ(cudaMemcpy(inc_count,
params.to_update_inc_count.data(),
params.to_update_id_count * sizeof(int),
cudaMemcpyHostToDevice),
cudaSuccess);
EXPECT_EQ(cudaDeviceSynchronize(), cudaSuccess);
SingleCacheSetTestKernel<<<1, kCacheSetSize>>>(cache_tag_ptr,
cache_lfu_ptr,
raw_counter_ptr,
to_update_ids,
inc_count,
load_to_cache_ids,
write_back_ids,
params.start_gid,
params.to_update_id_count);
EXPECT_EQ(cudaDeviceSynchronize(), cudaSuccess);
std::vector<uint16_t> cache_tag_vec_updated(kCacheSetSize), cache_lfu_vec_updated(kCacheSetSize);
std::vector<int64_t> raw_counter_updated(params.cache_set_coverage);
std::vector<int64_t> load_to_cache_ids_vec(params.to_update_id_count),
writeback_ids_vec(params.to_update_id_count);
EXPECT_EQ(cudaMemcpy(cache_tag_vec_updated.data(),
cache_tag_ptr,
kCacheSetSize * sizeof(uint16_t),
cudaMemcpyDeviceToHost),
cudaSuccess);
EXPECT_EQ(cudaMemcpy(cache_lfu_vec_updated.data(),
cache_lfu_ptr,
kCacheSetSize * sizeof(uint16_t),
cudaMemcpyDeviceToHost),
cudaSuccess);
EXPECT_EQ(cudaMemcpy(raw_counter_updated.data(),
raw_counter_ptr,
params.cache_set_coverage * sizeof(int64_t),
cudaMemcpyDeviceToHost),
cudaSuccess);
EXPECT_EQ(cudaMemcpy(load_to_cache_ids_vec.data(),
load_to_cache_ids,
params.to_update_id_count * sizeof(int64_t),
cudaMemcpyDeviceToHost),
cudaSuccess);
EXPECT_EQ(cudaMemcpy(writeback_ids_vec.data(),
write_back_ids,
params.to_update_id_count * sizeof(int64_t),
cudaMemcpyDeviceToHost),
cudaSuccess);
EXPECT_EQ(cudaDeviceSynchronize(), cudaSuccess);
bool success = CheckSingleResult(params,
cache_tag_vec_updated,
cache_lfu_vec_updated,
raw_counter_updated,
load_to_cache_ids_vec,
writeback_ids_vec);
if (!success) {
PrintInfo(params,
cache_tag_vec_updated,
cache_lfu_vec_updated,
raw_counter_updated,
load_to_cache_ids_vec,
writeback_ids_vec);
}
EXPECT_EQ(cudaFree(cache_tag_ptr), cudaSuccess);
EXPECT_EQ(cudaFree(cache_lfu_ptr), cudaSuccess);
EXPECT_EQ(cudaFree(to_update_ids), cudaSuccess);
EXPECT_EQ(cudaFree(write_back_ids), cudaSuccess);
EXPECT_EQ(cudaFree(load_to_cache_ids), cudaSuccess);
EXPECT_EQ(cudaFree(inc_count), cudaSuccess);
}
static SingleCacheSetTestParam modify_test{
.cache_tag_lids = {1, 3, 5, 9, 11, 13, 15, 17, 19, 21, 23, 25, 26, 28, 51, 60,
2, 4, 8, 10, 12, 14, 6, 18, 20, 22, 24, 0, 27, 40, 54, 62},
.cache_tag_valid = {true, true, true, true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true, true, true},
.cache_tag_modified = {false, true, false, true, false, true, false, false, false, true, true,
false, true, true, true, false, false, false, true, false, true, false,
true, false, false, true, false, true, true, true, true, false},
.cache_lfu_count = {1023, 435, 435, 23, 981, 13012, 231, 523, 1227, 8005, 324,
328, 443, 134, 218, 435, 32, 1324, 1112, 98, 1021, 1992,
4032, 747, 382, 1211, 832, 5123, 56, 1212, 622, 646},
.cache_lfu_count_scale = 0,
.cache_set_coverage = 64,
.raw_counter = {5123, 1023, 32, 435, 1324, 435, 4032, 245, 1112, 23, 98, 981, 1021,
13012, 1992, 231, 383, 523, 747, 1227, 382, 8005, 1211, 324, 832, 328,
443, 56, 134, 543, 998, 1768, 1211, 1321, 223, 148, 1234, 1211, 832,
1437, 1212, 82, 1080, 345, 1643, 1432, 424, 567, 839, 911, 493, 218,
1821, 921, 622, 1718, 428, 1283, 1198, 2355, 435, 864, 646, 346},
.start_gid = 128,
.to_update_id_count = 10,
.to_update_gids = {129, 157, 141, 177, 149, 182, 138, 191, 134, 144},
.to_update_inc_count = {53, 128, 4323, 321, 493, 232, 98, 43, 22, 134},
.name = "modify_test"};
static SingleCacheSetTestParam small_test{
.cache_tag_lids = {0, -1, -1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 0, -1, -1, -1, -1},
.cache_tag_valid = {false, false, false, false, false, true, false, false, false, true, false,
false, false, false, false, false, false, false, false, false, false, false,
true, false, false, false, false, true, false, false, false, false},
.cache_tag_modified = {false, false, false, false, false, true, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, true, false, false, false, false},
.cache_lfu_count = {0, 0, 0, 0, 0, 13012, 0, 0, 0, 2005, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 4032, 0, 0, 0, 0, 5123, 0, 0, 0, 0},
.cache_lfu_count_scale = 0,
.cache_set_coverage = 5,
.raw_counter = {5123, 4032, 13012, 2005, 1324},
.start_gid = 100,
.to_update_id_count = 3,
.to_update_gids = {100, 103, 104},
.to_update_inc_count = {53, 128, 4323},
.name = "small_test"};
static SingleCacheSetTestParam medium_test{
.cache_tag_lids = {0, 0, 0, -1, -1, 13, -1, -1, -1, 21, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, 6, -1, -1, -1, -1, 0, -1, -1, -1, -1},
.cache_tag_valid = {false, false, false, false, false, true, false, false, false, true, false,
false, false, false, false, false, false, false, false, false, false, false,
true, false, false, false, false, true, false, false, false, false},
.cache_tag_modified = {false, false, false, false, false, true, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, true, false, false, false, false},
.cache_lfu_count = {0, 0, 0, 0, 0, 13012, 0, 0, 0, 2005, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 4032, 0, 0, 0, 0, 5123, 0, 0, 0, 0},
.cache_lfu_count_scale = 0,
.cache_set_coverage = 64,
.raw_counter = {5123, 1023, 32, 435, 1324, 435, 4032, 245, 1112, 23, 98, 981, 1021,
13012, 1992, 231, 383, 523, 747, 1227, 382, 8005, 1211, 324, 832, 328,
443, 56, 134, 543, 998, 1768, 1211, 1321, 223, 148, 1234, 1211, 832,
1437, 1212, 82, 1080, 345, 1643, 1432, 424, 567, 839, 911, 493, 218,
1821, 921, 622, 1718, 428, 1283, 1198, 2355, 435, 864, 646, 346},
.start_gid = 128,
.to_update_id_count = 10,
.to_update_gids = {129, 157, 141, 177, 149, 182, 138, 191, 134, 144},
.to_update_inc_count = {53, 128, 4323, 321, 493, 232, 98, 43, 22, 134},
.name = "medium_test"};
static SingleCacheSetTestParam large_test{
.cache_tag_lids = {1, 3, 5, 9, 11, 13, 15, 17, 19, 21, 23, 25, 26, 28, 51, 60,
2, 4, 8, 10, 12, 14, 6, 18, 20, 22, 24, 0, 27, 40, 54, 62},
.cache_tag_valid = {true, true, true, true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true, true, true},
.cache_tag_modified = {true, false, false, true, false, true, false, false, true, true, true,
false, true, true, true, false, false, false, true, false, true, false,
true, false, false, true, false, true, true, true, true, false},
.cache_lfu_count = {16052, 15443, 14375, 12384, 12981, 13012, 15231, 15523, 14323, 10212, 13101,
12320, 10443, 14134, 16118, 14350, 14935, 13244, 11112, 11098, 11021, 11992,
14032, 13747, 13387, 12117, 11832, 15123, 15456, 13212, 13622, 11646},
.cache_lfu_count_scale = 0,
.cache_set_coverage = 564,
.raw_counter =
{5123, 15443, 32, 435, 1324, 435, 4032, 245, 1112, 23, 98, 981, 1021, 13012, 1992,
231, 383, 523, 747, 1227, 382, 8005, 1211, 324, 832, 328, 443, 56, 134, 543,
998, 1768, 1211, 1321, 223, 148, 1234, 1211, 832, 1437, 1212, 82, 1080, 345, 1643,
1432, 424, 567, 839, 911, 493, 218, 1821, 921, 622, 1718, 428, 1283, 1198, 2355,
435, 864, 646, 346, 5648, 4585, 8582, 719, 2589, 8971, 2506, 8304, 1806, 227, 8888,
1768, 2639, 39, 3973, 459, 4894, 5951, 3781, 4742, 1164, 1816, 7519, 8851, 9220, 2633,
2190, 786, 3953, 6205, 1906, 5076, 5105, 4325, 7744, 3166, 8925, 1643, 7225, 8278, 7081,
2416, 1508, 3839, 6741, 9856, 6771, 9414, 7760, 3782, 882, 8318, 3215, 2138, 4395, 8355,
731, 7970, 5753, 6595, 330, 7038, 4197, 1208, 7511, 6385, 4576, 7625, 8684, 522, 1363,
3336, 2904, 286, 2893, 4189, 8698, 7908, 7670, 6578, 5431, 8368, 8182, 6825, 1831, 6060,
2107, 1196, 2364, 2289, 4702, 6030, 5318, 4248, 1554, 3205, 8072, 3107, 3334, 3824, 7651,
5500, 2051, 6175, 9526, 9888, 253, 6108, 4949, 4317, 485, 4694, 9210, 1214, 4878, 6513,
6727, 85, 8023, 691, 794, 1345, 5172, 7539, 4341, 566, 5865, 4521, 5674, 6127, 3323,
6815, 6906, 6668, 2588, 3545, 3480, 6000, 7982, 633, 1353, 7631, 248, 4287, 1870, 8083,
5187, 1994, 9851, 2221, 4809, 5023, 1057, 7305, 6315, 837, 7281, 1743, 5507, 9712, 5893,
5959, 7229, 7949, 1656, 8973, 7083, 8529, 7699, 2690, 9797, 2200, 4344, 6601, 1239, 6522,
4689, 2285, 6114, 8070, 1398, 9452, 7836, 5294, 6305, 4656, 804, 6852, 4307, 4720, 9817,
2964, 2327, 5876, 9681, 8494, 7591, 9407, 1599, 3805, 4251, 8182, 5127, 5512, 7352, 1761,
4268, 1220, 4626, 6586, 8623, 8392, 1698, 7226, 4468, 5713, 9219, 176, 2160, 552, 8808,
9251, 2118, 9069, 7314, 7776, 3941, 3027, 1670, 3710, 2968, 4877, 6135, 2737, 732, 1681,
9418, 4751, 9061, 8579, 9087, 917, 7513, 2906, 1168, 4364, 9368, 4622, 3492, 2210, 505,
1779, 1494, 519, 4478, 7958, 9938, 6636, 6081, 8852, 8518, 3094, 5616, 4802, 7196, 332,
7204, 2291, 2626, 1247, 6454, 7642, 1189, 4454, 4819, 9813, 9771, 7137, 4721, 9919, 8594,
8443, 8574, 5554, 3104, 1200, 3118, 9787, 9503, 5527, 3047, 3231, 5714, 4561, 9928, 3954,
4496, 8760, 1700, 1984, 9969, 2750, 5143, 2176, 2012, 7975, 841, 1727, 6338, 7469, 9428,
5328, 8245, 8744, 1169, 9707, 5258, 3199, 1288, 3505, 5490, 2387, 7191, 3003, 6768, 7669,
3634, 1037, 6794, 7834, 174, 3692, 7498, 6992, 8051, 2786, 759, 4912, 5601, 9623, 529,
411, 9399, 1185, 6379, 2103, 165, 3169, 2145, 5374, 3762, 2752, 8824, 8635, 3801, 5684,
490, 4675, 3574, 8381, 4193, 3081, 3374, 5280, 5990, 2192, 1754, 1209, 6229, 1927, 9347,
2760, 7652, 1500, 7904, 4155, 6215, 4964, 2274, 4844, 8865, 4909, 6082, 5330, 9388, 5218,
2322, 5076, 1342, 4081, 9607, 1181, 2113, 3462, 8662, 6134, 5903, 7965, 5581, 9891, 8705,
5451, 1289, 3383, 3968, 4389, 3168, 922, 9339, 7622, 9600, 5754, 3631, 8317, 3698, 4132,
241, 4686, 4280, 149, 9151, 7115, 2583, 2077, 2544, 9998, 2736, 8915, 3767, 3640, 360,
5059, 7880, 921, 8832, 9432, 6848, 5673, 1247, 992, 5988, 3913, 2010, 4496, 9034, 844,
8482, 5071, 6610, 750, 9525, 8782, 3453, 5300, 6955, 6986, 678, 5760, 3055, 3352, 7631,
5751, 8796, 9636, 794, 7302, 5804, 2542, 6996, 4498, 6318, 443, 2100, 732, 302, 6633,
8169, 2662, 3265, 7489, 6825, 545, 4222, 2936, 5612, 2039, 8772, 5632, 7284, 679, 7634,
8935, 9315, 8439, 3390, 3, 9367, 3139, 8860, 9408},
.start_gid = 564 * 2,
.to_update_id_count = 301,
.to_update_gids =
{1385, 1257, 1684, 1341, 1302, 1550, 1536, 1665, 1233, 1259, 1452, 1574, 1219, 1637, 1633, 1155,
1660, 1492, 1273, 1612, 1380, 1329, 1650, 1382, 1628, 1520, 1150, 1429, 1526, 1264, 1327, 1339,
1600, 1575, 1506, 1147, 1538, 1358, 1681, 1497, 1365, 1511, 1471, 1209, 1253, 1395, 1611, 1516,
1143, 1229, 1335, 1552, 1639, 1479, 1180, 1352, 1654, 1541, 1314, 1465, 1437, 1469, 1585, 1203,
1666, 1629, 1376, 1576, 1474, 1261, 1610, 1608, 1525, 1514, 1216, 1188, 1231, 1675, 1196, 1531,
1445, 1330, 1548, 1553, 1599, 1535, 1243, 1360, 1417, 1284, 1190, 1510, 1357, 1425, 1228, 1185,
1647, 1502, 1463, 1217, 1234, 1581, 1508, 1202, 1244, 1554, 1447, 1411, 1649, 1591, 1683, 1448,
1475, 1348, 1232, 1182, 1305, 1459, 1495, 1635, 1204, 1603, 1258, 1609, 1291, 1439, 1604, 1205,
1211, 1389, 1183, 1159, 1331, 1176, 1394, 1384, 1595, 1632, 1252, 1313, 1221, 1444, 1315, 1563,
1499, 1436, 1157, 1407, 1614, 1387, 1539, 1274, 1300, 1468, 1248, 1263, 1615, 1236, 1435, 1476,
1513, 1173, 1621, 1477, 1301, 1139, 1207, 1379, 1423, 1277, 1657, 1344, 1561, 1132, 1688, 1247,
1587, 1646, 1582, 1168, 1337, 1212, 1518, 1651, 1359, 1144, 1289, 1275, 1442, 1386, 1171, 1398,
1605, 1408, 1456, 1601, 1362, 1590, 1392, 1427, 1640, 1478, 1440, 1317, 1562, 1524, 1679, 1593,
1371, 1161, 1287, 1271, 1432, 1509, 1268, 1377, 1586, 1519, 1512, 1312, 1624, 1424, 1449, 1673,
1662, 1641, 1189, 1528, 1588, 1627, 1356, 1664, 1149, 1288, 1682, 1517, 1145, 1278, 1192, 1251,
1332, 1197, 1668, 1181, 1151, 1397, 1272, 1129, 1403, 1396, 1690, 1626, 1458, 1678, 1622, 1613,
1466, 1464, 1462, 1166, 1472, 1606, 1616, 1152, 1280, 1583, 1428, 1255, 1167, 1138, 1319, 1325,
1195, 1170, 1529, 1299, 1555, 1617, 1318, 1584, 1638, 1153, 1201, 1218, 1186, 1671, 1369, 1158,
1308, 1136, 1294, 1283, 1661, 1148, 1547, 1311, 1163, 1540, 1644, 1433, 1326},
.to_update_inc_count =
{5981, 9042, 973, 2226, 854, 852, 7049, 7321, 8050, 5163, 6911, 982, 9499, 1054, 4416, 2165,
2370, 368, 6701, 1535, 4436, 5248, 8373, 413, 9714, 4219, 4004, 1499, 1869, 2981, 8502, 7151,
9540, 2599, 675, 9694, 9449, 7763, 340, 8713, 3265, 6000, 457, 3544, 3728, 8080, 885, 212,
5650, 936, 6505, 8174, 1289, 6716, 2555, 5105, 7914, 7139, 4365, 867, 3965, 9160, 4308, 8185,
6733, 295, 5703, 1233, 1911, 8645, 5491, 290, 9863, 7433, 4131, 7492, 9820, 6942, 4029, 5172,
7543, 2824, 7025, 3613, 3949, 2460, 8218, 8850, 1318, 5066, 3412, 4549, 2381, 6508, 2363, 5036,
7648, 8770, 7072, 1420, 3327, 1256, 7764, 7754, 9291, 1269, 536, 6053, 3986, 8556, 2952, 46,
1168, 201, 8149, 6083, 9268, 1812, 8588, 4570, 4264, 879, 2978, 640, 3426, 4150, 3856, 4796,
8111, 107, 3856, 921, 7074, 9830, 6834, 8179, 2449, 8987, 433, 5333, 3101, 4997, 1702, 6988,
8137, 3829, 4773, 8572, 600, 9491, 6046, 3345, 7287, 6231, 1987, 4090, 3120, 4693, 3296, 3413,
8912, 6731, 8819, 3574, 5593, 1252, 5849, 8096, 9798, 2682, 180, 1082, 8307, 66, 8985, 4710,
8863, 5113, 6863, 1711, 470, 455, 920, 8954, 7635, 4959, 2015, 9372, 5116, 1952, 4506, 7781,
5440, 5517, 1597, 4127, 2295, 8661, 5313, 2370, 1825, 8812, 973, 9608, 6754, 6232, 644, 7299,
2651, 6003, 961, 927, 1176, 4088, 6704, 832, 919, 9768, 9495, 9495, 8424, 7835, 5242, 6485,
4592, 1914, 1801, 4748, 9097, 725, 7925, 2211, 6742, 1614, 8052, 8012, 9887, 6160, 9161, 5545,
9814, 3598, 938, 6706, 3872, 1932, 4630, 5308, 487, 7306, 2870, 6778, 4684, 155, 8430, 1234,
3048, 1874, 4456, 2971, 2954, 3132, 3006, 4403, 5876, 1131, 5834, 2547, 4051, 772, 5353, 5751,
968, 3021, 1402, 4198, 98, 7635, 4559, 3067, 8476, 8574, 5676, 1766, 8834, 5284, 7752, 7682,
920, 4078, 4887, 1333, 5714, 7995, 6125, 1293, 2299, 5103, 7710, 3755, 4399},
.name = "large_test"};
static SingleCacheSetTestParam big_number_test{
.cache_tag_lids =
{
3, 16, 6, 21, 10, 13, 9, 17, 24, 30, 27, 7, 28, 1, 5, 18,
11, 12, 22, 4, 25, 8, 23, 20, 15, 32, 31, 2, 0, 19, 29, 26,
},
.cache_tag_valid = {true, true, false, true, false, false, false, true, true, false, false,
false, true, false, true, true, true, true, false, true, true, true,
false, false, true, true, false, true, false, false, true, false},
.cache_tag_modified = {false, true, false, false, false, true, false, false, true, false, true,
true, false, false, true, false, false, true, false, true, true, false,
true, true, false, false, true, true, true, true, true, false},
.cache_lfu_count = {7769, 7418, 15671, 10379, 12640, 4833, 2127, 1920, 9986, 3516, 9790,
15974, 3257, 5591, 487, 3603, 5892, 2805, 12370, 10538, 1750, 1011,
11314, 1826, 6522, 12076, 14259, 394, 1729, 11217, 12869, 15354},
.cache_lfu_count_scale = 0,
.cache_set_coverage = 33,
.raw_counter = {747, 2510, 394, 7769, 10538, 487, 10896, 1907, 1011, 318, 13798,
5892, 2805, 7616, 4431, 6522, 7418, 1920, 3603, 10777, 8169, 10379,
9185, 15366, 9986, 1750, 12121, 12857, 3257, 12869, 6256, 11222, 12076},
.start_gid = 10375660383LL,
.to_update_id_count = 31,
.to_update_gids = {10375660412LL, 10375660389LL, 10375660395LL, 10375660410LL, 10375660396LL,
10375660402LL, 10375660409LL, 10375660390LL, 10375660384LL, 10375660388LL,
10375660408LL, 10375660401LL, 10375660415LL, 10375660394LL, 10375660407LL,
10375660386LL, 10375660387LL, 10375660399LL, 10375660383LL, 10375660414LL,
10375660392LL, 10375660393LL, 10375660406LL, 10375660398LL, 10375660385LL,
10375660397LL, 10375660400LL, 10375660413LL, 10375660403LL, 10375660391LL,
10375660405LL},
.to_update_inc_count = {7199, 7919, 4122, 10946, 11288, 16077, 5925, 5390, 6516, 11525, 3016,
514, 1008, 12538, 14024, 13003, 12927, 4785, 10512, 830, 13930, 9032,
3992, 13998, 5678, 3930, 6631, 13014, 3571, 1769, 14647},
.name = "big_number_test"};
INSTANTIATE_TEST_SUITE_P(CacheSetTest,
CacheSetSingleCaseTests,
::testing::Values(modify_test,
big_number_test,
small_test,
medium_test,
large_test,
SingleCacheSetTestParam().Random(21, 12, 10),
SingleCacheSetTestParam().Random(1, 11, 10),
SingleCacheSetTestParam().Random(11, 10, 10),
SingleCacheSetTestParam().Random(5, 9, 7),
SingleCacheSetTestParam().Random(4, 8, 8),
SingleCacheSetTestParam().Random(24, 7, 5),
SingleCacheSetTestParam().Random(15, 6, 3),
SingleCacheSetTestParam().Random(35, 5, 5),
SingleCacheSetTestParam().Random(27, 4, 4),
SingleCacheSetTestParam().Random(23, 3, 1),
SingleCacheSetTestParam().Random(23, 2, 2),
SingleCacheSetTestParam().Random(23, 1, 1),
SingleCacheSetTestParam().Random(21, 11201, 948),
SingleCacheSetTestParam().Random(0, 123, 31),
SingleCacheSetTestParam().Random(0, 1212, 1002),
SingleCacheSetTestParam().Random(0, 523, 523),
SingleCacheSetTestParam().Random(0, 1001, 3),
SingleCacheSetTestParam().Random(0, 33, 31),
SingleCacheSetTestParam().Random(1, 542, 201),
SingleCacheSetTestParam().Random(5, 123, 31),
SingleCacheSetTestParam().Random(3, 16384, 5000),
SingleCacheSetTestParam().Random(2, 10210, 432),
SingleCacheSetTestParam().Random(10, 15422, 4392),
SingleCacheSetTestParam().Random(11, 9382, 9382),
SingleCacheSetTestParam().Random(6, 8437, 7983),
SingleCacheSetTestParam().Random(18, 832, 38),
SingleCacheSetTestParam().Random(32, 1121, 998),
SingleCacheSetTestParam().Random(35, 3232, 99),
SingleCacheSetTestParam().Random(41, 5242, 422),
SingleCacheSetTestParam().Random(32, 292, 127),
SingleCacheSetTestParam().Random(2, 948, 91),
SingleCacheSetTestParam().Random(11, 3221, 942),
SingleCacheSetTestParam().Random(22, 938, 150),
SingleCacheSetTestParam().Random(21, 12, 10),
SingleCacheSetTestParam().Random(1, 11, 10),
SingleCacheSetTestParam().Random(11, 10, 10),
SingleCacheSetTestParam().Random(5, 9, 7),
SingleCacheSetTestParam().Random(4, 8, 8),
SingleCacheSetTestParam().Random(24, 7, 5),
SingleCacheSetTestParam().Random(15, 6, 3),
SingleCacheSetTestParam().Random(35, 5, 5),
SingleCacheSetTestParam().Random(27, 4, 4),
SingleCacheSetTestParam().Random(23, 3, 1),
SingleCacheSetTestParam().Random(23, 2, 2),
SingleCacheSetTestParam().Random(23, 1, 1),
SingleCacheSetTestParam().Random(21, 11201, 948),
SingleCacheSetTestParam().Random(0, 123, 31),
SingleCacheSetTestParam().Random(0, 1212, 1002),
SingleCacheSetTestParam().Random(0, 523, 523),
SingleCacheSetTestParam().Random(0, 1001, 3),
SingleCacheSetTestParam().Random(0, 33, 31),
SingleCacheSetTestParam().Random(1, 542, 201),
SingleCacheSetTestParam().Random(5, 123, 31),
SingleCacheSetTestParam().Random(3, 16384, 5000),
SingleCacheSetTestParam().Random(2, 10210, 432),
SingleCacheSetTestParam().Random(10, 15422, 4392),
SingleCacheSetTestParam().Random(11, 9382, 9382),
SingleCacheSetTestParam().Random(6, 8437, 7983),
SingleCacheSetTestParam().Random(18, 832, 38),
SingleCacheSetTestParam().Random(32, 1121, 998),
SingleCacheSetTestParam().Random(35, 3232, 99),
SingleCacheSetTestParam().Random(41, 5242, 422),
SingleCacheSetTestParam().Random(32, 292, 127),
SingleCacheSetTestParam().Random(2, 948, 91),
SingleCacheSetTestParam().Random(11, 3221, 942),
SingleCacheSetTestParam().Random(22, 938, 150),
SingleCacheSetTestParam().Random(21, 12, 10),
SingleCacheSetTestParam().Random(1, 11, 10),
SingleCacheSetTestParam().Random(11, 10, 10),
SingleCacheSetTestParam().Random(5, 9, 7),
SingleCacheSetTestParam().Random(4, 8, 8),
SingleCacheSetTestParam().Random(24, 7, 5),
SingleCacheSetTestParam().Random(15, 6, 3),
SingleCacheSetTestParam().Random(35, 5, 5),
SingleCacheSetTestParam().Random(27, 4, 4),
SingleCacheSetTestParam().Random(23, 3, 1),
SingleCacheSetTestParam().Random(23, 2, 2),
SingleCacheSetTestParam().Random(23, 1, 1),
SingleCacheSetTestParam().Random(21, 11201, 948),
SingleCacheSetTestParam().Random(0, 123, 31),
SingleCacheSetTestParam().Random(0, 1212, 1002),
SingleCacheSetTestParam().Random(0, 523, 523),
SingleCacheSetTestParam().Random(0, 1001, 3),
SingleCacheSetTestParam().Random(0, 33, 31),
SingleCacheSetTestParam().Random(1, 542, 201),
SingleCacheSetTestParam().Random(5, 123, 31),
SingleCacheSetTestParam().Random(3, 16384, 5000),
SingleCacheSetTestParam().Random(2, 10210, 432),
SingleCacheSetTestParam().Random(10, 15422, 4392),
SingleCacheSetTestParam().Random(11, 9382, 9382),
SingleCacheSetTestParam().Random(6, 8437, 7983),
SingleCacheSetTestParam().Random(18, 832, 38),
SingleCacheSetTestParam().Random(32, 1121, 998),
SingleCacheSetTestParam().Random(35, 3232, 99),
SingleCacheSetTestParam().Random(41, 5242, 422),
SingleCacheSetTestParam().Random(32, 292, 127),
SingleCacheSetTestParam().Random(2, 948, 91),
SingleCacheSetTestParam().Random(11, 3221, 942),
SingleCacheSetTestParam().Random(22, 938, 150),
SingleCacheSetTestParam().Random(21, 12, 10),
SingleCacheSetTestParam().Random(1, 11, 10),
SingleCacheSetTestParam().Random(11, 10, 10),
SingleCacheSetTestParam().Random(5, 9, 7),
SingleCacheSetTestParam().Random(4, 8, 8),
SingleCacheSetTestParam().Random(24, 7, 5),
SingleCacheSetTestParam().Random(15, 6, 3),
SingleCacheSetTestParam().Random(35, 5, 5),
SingleCacheSetTestParam().Random(27, 4, 4),
SingleCacheSetTestParam().Random(23, 3, 1),
SingleCacheSetTestParam().Random(23, 2, 2),
SingleCacheSetTestParam().Random(23, 1, 1),
SingleCacheSetTestParam().Random(21, 11201, 948),
SingleCacheSetTestParam().Random(0, 123, 31),
SingleCacheSetTestParam().Random(0, 1212, 1002),
SingleCacheSetTestParam().Random(0, 523, 523),
SingleCacheSetTestParam().Random(0, 1001, 3),
SingleCacheSetTestParam().Random(0, 33, 31),
SingleCacheSetTestParam().Random(1, 542, 201),
SingleCacheSetTestParam().Random(5, 123, 31),
SingleCacheSetTestParam().Random(3, 16384, 5000),
SingleCacheSetTestParam().Random(2, 10210, 432),
SingleCacheSetTestParam().Random(10, 15422, 4392),
SingleCacheSetTestParam().Random(11, 9382, 9382),
SingleCacheSetTestParam().Random(6, 8437, 7983),
SingleCacheSetTestParam().Random(18, 832, 38),
SingleCacheSetTestParam().Random(32, 1121, 998),
SingleCacheSetTestParam().Random(35, 3232, 99),
SingleCacheSetTestParam().Random(41, 5242, 422),
SingleCacheSetTestParam().Random(32, 292, 127),
SingleCacheSetTestParam().Random(2, 948, 91),
SingleCacheSetTestParam().Random(11, 3221, 942),
SingleCacheSetTestParam().Random(22, 938, 150),
SingleCacheSetTestParam()));
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/wholememory_ops/wholememory_gather_tests.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <wholememory/tensor_description.h>
#include <wholememory/wholememory.h>
#include <wholememory/wholememory_op.h>
#include "parallel_utils.hpp"
#include "wholememory/communicator.hpp"
#include "wholememory/env_func_ptrs.hpp"
#include "wholememory/initialize.hpp"
#include "../wholememory/wholememory_test_utils.hpp"
#include "embedding_test_utils.hpp"
static int g_dev_count = 0;
typedef struct WholeMemoryGatherTestParam {
wholememory_matrix_description_t get_embedding_desc() const
{
int64_t matrix_sizes[2] = {embedding_entry_count, embedding_dim};
return wholememory_create_matrix_desc(
matrix_sizes, embedding_stride, embedding_storage_offset, embedding_type);
}
wholememory_array_description_t get_indices_desc() const
{
return wholememory_create_array_desc(indices_count, indices_storage_offset, indices_type);
}
wholememory_matrix_description_t get_output_desc() const
{
int64_t output_sizes[2] = {indices_count, embedding_dim};
return wholememory_create_matrix_desc(
output_sizes, output_stride, output_storage_offset, output_type);
}
int64_t get_embedding_granularity() const
{
return embedding_stride * wholememory_dtype_get_element_size(embedding_type);
}
WholeMemoryGatherTestParam& set_memory_type(wholememory_memory_type_t new_memory_type)
{
memory_type = new_memory_type;
return *this;
}
WholeMemoryGatherTestParam& set_memory_location(wholememory_memory_location_t new_memory_location)
{
memory_location = new_memory_location;
return *this;
}
WholeMemoryGatherTestParam& set_entry_count(int64_t entry_count)
{
embedding_entry_count = entry_count;
return *this;
}
WholeMemoryGatherTestParam& set_embedding_dim(int64_t new_embedding_dim)
{
embedding_dim = new_embedding_dim;
if (embedding_stride < embedding_dim) embedding_stride = embedding_dim;
if (output_stride < embedding_dim) output_stride = embedding_dim;
return *this;
}
WholeMemoryGatherTestParam& set_embedding_stride(int64_t new_embedding_stride)
{
embedding_stride = new_embedding_stride;
return *this;
}
WholeMemoryGatherTestParam& set_output_stride(int64_t new_output_stride)
{
output_stride = new_output_stride;
return *this;
}
WholeMemoryGatherTestParam& set_indices_count(int64_t new_indices_count)
{
indices_count = new_indices_count;
return *this;
}
WholeMemoryGatherTestParam& set_embedding_type(wholememory_dtype_t new_embedding_type)
{
embedding_type = new_embedding_type;
return *this;
}
WholeMemoryGatherTestParam& set_indices_type(wholememory_dtype_t new_indices_type)
{
indices_type = new_indices_type;
return *this;
}
WholeMemoryGatherTestParam& set_output_type(wholememory_dtype_t new_output_type)
{
output_type = new_output_type;
return *this;
}
wholememory_memory_type_t memory_type = WHOLEMEMORY_MT_CHUNKED;
wholememory_memory_location_t memory_location = WHOLEMEMORY_ML_DEVICE;
int64_t embedding_entry_count = 1000000LL;
int64_t embedding_dim = 32;
int64_t embedding_stride = 32;
int64_t indices_count = 100000;
int64_t output_stride = 32;
wholememory_dtype_t embedding_type = WHOLEMEMORY_DT_FLOAT;
wholememory_dtype_t indices_type = WHOLEMEMORY_DT_INT;
wholememory_dtype_t output_type = WHOLEMEMORY_DT_FLOAT;
int64_t embedding_storage_offset = 0;
int64_t indices_storage_offset = 0;
int64_t output_storage_offset = 0;
} WholeMemoryGatherTestParam;
class WholeMemoryGatherParameterTests
: public ::testing::TestWithParam<WholeMemoryGatherTestParam> {};
TEST_P(WholeMemoryGatherParameterTests, GatherTest)
{
auto params = GetParam();
EXPECT_GE(g_dev_count, 1);
std::vector<std::array<int, 2>> pipes;
CreatePipes(&pipes, g_dev_count);
MultiProcessRun(
g_dev_count,
[¶ms, &pipes](int world_rank, int world_size) {
EXPECT_EQ(wholememory_init(0), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaSetDevice(world_rank), cudaSuccess);
wholememory_comm_t wm_comm = create_communicator_by_pipes(pipes, world_rank, world_size);
if (wholememory_communicator_support_type_location(
wm_comm, params.memory_type, params.memory_location) != WHOLEMEMORY_SUCCESS) {
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
if (world_rank == 0) GTEST_SKIP_("Skip due to not supported.");
return;
}
wholememory_handle_t embedding_handle;
auto embedding_desc = params.get_embedding_desc();
auto indices_desc = params.get_indices_desc();
auto output_desc = params.get_output_desc();
size_t embedding_entry_size = params.get_embedding_granularity();
EXPECT_EQ(wholememory_malloc(&embedding_handle,
wholememory_get_memory_size_from_matrix(&embedding_desc),
wm_comm,
params.memory_type,
params.memory_location,
embedding_entry_size),
WHOLEMEMORY_SUCCESS);
cudaStream_t stream;
EXPECT_EQ(cudaStreamCreate(&stream), cudaSuccess);
void *dev_indices = nullptr, *dev_gather_buffer = nullptr, *dev_reference_buffer = nullptr;
void *host_indices = nullptr, *host_gather_buffer = nullptr, *host_reference_buffer = nullptr;
size_t gather_buffer_size = wholememory_get_memory_size_from_matrix(&output_desc);
size_t indices_buffer_size = wholememory_get_memory_size_from_array(&indices_desc);
EXPECT_EQ(cudaMallocHost(&host_indices, indices_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_indices, indices_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_gather_buffer, gather_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_reference_buffer, gather_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMallocHost(&host_gather_buffer, gather_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMallocHost(&host_reference_buffer, gather_buffer_size), cudaSuccess);
wholememory_ops::testing::device_random_init_local_embedding_table(
embedding_handle, embedding_desc, stream);
wholememory_ops::testing::host_random_init_indices(
host_indices, indices_desc, embedding_desc.sizes[0]);
EXPECT_EQ(cudaMemcpyAsync(dev_indices,
host_indices,
wholememory_get_memory_size_from_array(&indices_desc),
cudaMemcpyHostToDevice,
stream),
cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
wholememory_communicator_barrier(wm_comm);
wholememory_tensor_t embedding_tensor;
wholememory_tensor_description_t embedding_tensor_desc;
wholememory_copy_matrix_desc_to_tensor(&embedding_tensor_desc, &embedding_desc);
EXPECT_EQ(wholememory_make_tensor_from_handle(
&embedding_tensor, embedding_handle, &embedding_tensor_desc),
WHOLEMEMORY_SUCCESS);
wholememory_tensor_t indices_tensor, output_tensor;
wholememory_tensor_description_t indices_tensor_desc, output_tensor_desc;
wholememory_copy_array_desc_to_tensor(&indices_tensor_desc, &indices_desc);
wholememory_copy_matrix_desc_to_tensor(&output_tensor_desc, &output_desc);
EXPECT_EQ(
wholememory_make_tensor_from_pointer(&indices_tensor, dev_indices, &indices_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_make_tensor_from_pointer(
&output_tensor, dev_gather_buffer, &output_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_gather(embedding_tensor,
indices_tensor,
output_tensor,
wholememory::get_default_env_func(),
stream),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaGetLastError(), cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
EXPECT_EQ(wholememory_destroy_tensor(indices_tensor), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_destroy_tensor(output_tensor), WHOLEMEMORY_SUCCESS);
wholememory_ops::testing::device_get_expected_embedding(dev_reference_buffer,
output_desc,
embedding_desc.dtype,
dev_indices,
indices_desc,
wholememory::get_default_env_func(),
stream);
EXPECT_EQ(cudaMemcpyAsync(host_gather_buffer,
dev_gather_buffer,
wholememory_get_memory_size_from_matrix(&output_desc),
cudaMemcpyDeviceToHost,
stream),
cudaSuccess);
EXPECT_EQ(cudaMemcpyAsync(host_reference_buffer,
dev_reference_buffer,
wholememory_get_memory_size_from_matrix(&output_desc),
cudaMemcpyDeviceToHost,
stream),
cudaSuccess);
EXPECT_EQ(cudaGetLastError(), cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
wholememory_ops::testing::host_check_embedding_same(
host_gather_buffer, output_desc, host_reference_buffer, output_desc);
EXPECT_EQ(cudaFreeHost(host_indices), cudaSuccess);
EXPECT_EQ(cudaFree(dev_indices), cudaSuccess);
EXPECT_EQ(cudaFree(dev_gather_buffer), cudaSuccess);
EXPECT_EQ(cudaFree(dev_reference_buffer), cudaSuccess);
EXPECT_EQ(cudaFreeHost(host_gather_buffer), cudaSuccess);
EXPECT_EQ(cudaFreeHost(host_reference_buffer), cudaSuccess);
EXPECT_EQ(wholememory_destroy_tensor(embedding_tensor), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_free(embedding_handle), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
},
true);
}
INSTANTIATE_TEST_SUITE_P(
WholeMemoryGatherOpTests,
WholeMemoryGatherParameterTests,
::testing::Values(
#if 0
WholeMemoryGatherTestParam()
.set_memory_location(WHOLEMEMORY_ML_DEVICE)
.set_indices_type(WHOLEMEMORY_DT_INT64)
.set_entry_count((1LL << 23LL) + 131)
.set_embedding_dim(1024)
.set_indices_count(100005),
#endif
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_CONTINUOUS),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_CHUNKED),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_CONTINUOUS).set_indices_count(0),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_CHUNKED).set_indices_count(0),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED).set_indices_count(0),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_memory_location(WHOLEMEMORY_ML_HOST),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_memory_location(WHOLEMEMORY_ML_HOST),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_memory_location(WHOLEMEMORY_ML_HOST),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_embedding_dim(11)
.set_embedding_stride(12)
.set_indices_count(100005),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_embedding_dim(11)
.set_embedding_stride(12)
.set_indices_count(100005),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_embedding_dim(11)
.set_embedding_stride(12)
.set_indices_count(100005),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_CONTINUOUS).set_embedding_dim(128),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_CHUNKED).set_embedding_dim(128),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED).set_embedding_dim(128),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_CONTINUOUS).set_embedding_dim(127),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_CHUNKED).set_embedding_dim(127),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED).set_embedding_dim(127),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_CONTINUOUS).set_embedding_dim(129),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_CHUNKED).set_embedding_dim(129),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED).set_embedding_dim(129),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_CONTINUOUS).set_embedding_dim(513),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_CHUNKED).set_embedding_dim(513),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED).set_embedding_dim(513),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_embedding_type(WHOLEMEMORY_DT_HALF),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_embedding_type(WHOLEMEMORY_DT_HALF),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_embedding_type(WHOLEMEMORY_DT_HALF),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_output_type(WHOLEMEMORY_DT_HALF),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_output_type(WHOLEMEMORY_DT_HALF),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_output_type(WHOLEMEMORY_DT_HALF),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_embedding_type(WHOLEMEMORY_DT_HALF)
.set_output_type(WHOLEMEMORY_DT_HALF),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_embedding_type(WHOLEMEMORY_DT_HALF)
.set_output_type(WHOLEMEMORY_DT_HALF),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_embedding_type(WHOLEMEMORY_DT_HALF)
.set_output_type(WHOLEMEMORY_DT_HALF),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_indices_type(WHOLEMEMORY_DT_INT64),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_indices_type(WHOLEMEMORY_DT_INT64),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_indices_type(WHOLEMEMORY_DT_INT64),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_embedding_stride(33),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_CHUNKED).set_embedding_stride(33),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_embedding_stride(33),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_CONTINUOUS).set_output_stride(33),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_CHUNKED).set_output_stride(33),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED).set_output_stride(33),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_embedding_type(WHOLEMEMORY_DT_HALF)
.set_embedding_stride(33),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_embedding_type(WHOLEMEMORY_DT_HALF)
.set_embedding_stride(33),
WholeMemoryGatherTestParam()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_embedding_type(WHOLEMEMORY_DT_HALF)
.set_embedding_stride(33),
WholeMemoryGatherTestParam().set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)));
class GlobalEnvironment : public ::testing::Environment {
public:
void SetUp() override { g_dev_count = ForkGetDeviceCount(); }
void TearDown() override {}
};
int main(int argc, char** argv)
{
::testing::InitGoogleTest(&argc, argv);
::testing::AddGlobalTestEnvironment(new GlobalEnvironment);
return RUN_ALL_TESTS();
}
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/wholememory_ops/embedding_test_utils.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/env_func_ptrs.h>
#include <wholememory/tensor_description.h>
#include <wholememory/wholememory.h>
namespace wholememory_ops {
namespace testing {
void device_matrix_type_cast(void* dst,
wholememory_matrix_description_t dst_desc,
const void* src,
wholememory_matrix_description_t src_desc,
cudaStream_t stream);
void device_array_type_cast(void* dst,
wholememory_array_description_t dst_desc,
const void* src,
wholememory_array_description_t src_desc,
cudaStream_t stream);
void device_random_init_local_embedding_table(wholememory_handle_t embedding_handle,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream);
void device_get_expected_embedding(void* output,
wholememory_matrix_description_t output_desc,
wholememory_dtype_t embedding_dtype,
void* indices,
wholememory_array_description_t indices_desc,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream);
/**
* random generate indices from [0, max_indices)
* @param indices : pointer of output
* @param indices_desc : description of output
* @param max_indices : max_indices
*/
void host_random_init_indices(void* indices,
wholememory_array_description_t indices_desc,
int64_t max_indices);
void host_check_embedding_same(void* host_embedding,
wholememory_matrix_description_t embedding_desc,
void* host_reference,
wholememory_matrix_description_t reference_desc);
void host_random_init_float(float* data, int64_t len, float max_value, float min_value);
} // namespace testing
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/wholememory_ops/wholememory_embedding_gradient_apply_tests.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <wholememory/embedding.h>
#include <map>
#include <string>
#include "../wholememory/wholememory_test_utils.hpp"
#include "embedding_test_utils.hpp"
#include "wholememory/env_func_ptrs.hpp"
struct EmbeddingBackwardTestParams {
EmbeddingBackwardTestParams()
{
const int64_t kDefaultEmbeddingEntryCount = 400001;
const int64_t kDefaultEmbeddingDim = 127;
const int64_t kDefaultGatherIndiceCount = 100005;
int64_t embedding_sizes[2] = {kDefaultEmbeddingEntryCount, kDefaultEmbeddingDim};
embedding_description = wholememory_create_matrix_desc(
&embedding_sizes[0], kDefaultEmbeddingDim, 0, WHOLEMEMORY_DT_FLOAT);
indice_description =
wholememory_create_array_desc(kDefaultGatherIndiceCount, 0, WHOLEMEMORY_DT_INT64);
int64_t output_sizes[2] = {kDefaultGatherIndiceCount, kDefaultEmbeddingDim};
grad_description = wholememory_create_matrix_desc(
&output_sizes[0], kDefaultEmbeddingDim, 0, WHOLEMEMORY_DT_FLOAT);
}
bool is_large_test()
{
int64_t embedding_table_mem_size =
wholememory_get_memory_element_count_from_matrix(&embedding_description) *
wholememory_dtype_get_element_size(embedding_description.dtype);
if (embedding_table_mem_size > 2LL * 1024 * 1024 * 1024) return true;
return false;
}
EmbeddingBackwardTestParams& set_entry_count(int64_t entry_count)
{
embedding_description.sizes[0] = entry_count;
return *this;
}
EmbeddingBackwardTestParams& set_embedding_dim(int embedding_dim)
{
embedding_description.sizes[1] = embedding_dim;
grad_description.sizes[1] = embedding_dim;
embedding_description.stride = embedding_dim;
if (grad_description.stride < embedding_dim) grad_description.stride = embedding_dim;
return *this;
}
EmbeddingBackwardTestParams& set_indice_count(int indice_count)
{
indice_description.size = indice_count;
grad_description.sizes[0] = indice_count;
return *this;
}
EmbeddingBackwardTestParams& set_indice_dtype(wholememory_dtype_t dtype)
{
indice_description.dtype = dtype;
return *this;
}
EmbeddingBackwardTestParams& set_grad_stride(int stride)
{
grad_description.stride = stride;
return *this;
}
EmbeddingBackwardTestParams& set_memory_type(wholememory_memory_type_t mt)
{
memory_type = mt;
return *this;
}
EmbeddingBackwardTestParams& set_memory_location(wholememory_memory_location_t ml)
{
memory_location = ml;
return *this;
}
EmbeddingBackwardTestParams& set_cache_memory_type(wholememory_memory_type_t cmt)
{
cache_memory_type = cmt;
return *this;
}
EmbeddingBackwardTestParams& set_cache_memory_location(wholememory_memory_location_t cml)
{
cache_memory_location = cml;
return *this;
}
EmbeddingBackwardTestParams& set_cache_ratio(float ratio)
{
cache_ratio = ratio;
return *this;
}
wholememory_embedding_cache_policy_t get_cache_policy(wholememory_comm_t comm)
{
wholememory_embedding_cache_policy_t cache_policy = nullptr;
if (!use_cache) return nullptr;
EXPECT_EQ(wholememory_create_embedding_cache_policy(&cache_policy,
comm,
cache_memory_type,
cache_memory_location,
WHOLEMEMORY_AT_READWRITE,
cache_ratio),
WHOLEMEMORY_SUCCESS);
return cache_policy;
}
EmbeddingBackwardTestParams& set_use_cache()
{
use_cache = true;
return *this;
}
EmbeddingBackwardTestParams& set_run_count(int rc)
{
run_count = rc;
return *this;
}
EmbeddingBackwardTestParams& set_optimizer_type(wholememory_optimizer_type_t opt_type)
{
optimizer_type = opt_type;
return *this;
}
EmbeddingBackwardTestParams& set_optimizer_params(const std::string& param_name, float value)
{
optimizer_params[param_name] = value;
return *this;
}
EmbeddingBackwardTestParams& set_lr(const std::string& param_name, float lr)
{
lr_ = lr;
return *this;
}
wholememory_array_description_t indice_description;
wholememory_matrix_description_t embedding_description;
wholememory_matrix_description_t grad_description;
wholememory_memory_type_t memory_type = WHOLEMEMORY_MT_CHUNKED;
wholememory_memory_location_t memory_location = WHOLEMEMORY_ML_HOST;
wholememory_memory_type_t cache_memory_type = WHOLEMEMORY_MT_CHUNKED;
wholememory_memory_location_t cache_memory_location = WHOLEMEMORY_ML_DEVICE;
wholememory_optimizer_type_t optimizer_type = WHOLEMEMORY_OPT_SGD;
float cache_ratio = 0.2;
bool use_cache = false;
int run_count = 1;
float lr_ = 0.1;
std::map<std::string, float> optimizer_params;
};
class WholeMemoryEmbeddingBackwardParameterTests
: public ::testing::TestWithParam<EmbeddingBackwardTestParams> {};
class CPUOptimizer {
public:
CPUOptimizer(EmbeddingBackwardTestParams* params, int64_t start_entry, int64_t end_entry)
: params_(params), start_entry_(start_entry), end_entry_(end_entry)
{
parse_params();
create_optimizer_states();
}
~CPUOptimizer() = default;
void Apply(float lr,
const std::vector<int64_t>& indices,
const std::vector<std::vector<float>>& grads,
std::vector<std::vector<float>>& embs)
{
for (int64_t i = 0; i < indices.size(); i++) {
int64_t index = indices[i];
int64_t local_index = index - start_entry_;
auto& grad_vec = grads[i];
auto& emb_vec = embs[index];
switch (params_->optimizer_type) {
case WHOLEMEMORY_OPT_LAZY_ADAM: {
ApplyLazyAdam(lr, local_index, grad_vec, emb_vec);
break;
}
case WHOLEMEMORY_OPT_SGD: {
ApplySGD(lr, local_index, grad_vec, emb_vec);
break;
}
case WHOLEMEMORY_OPT_ADAGRAD: {
ApplyAdaGrad(lr, local_index, grad_vec, emb_vec);
break;
}
case WHOLEMEMORY_OPT_RMSPROP: {
ApplyRMSProp(lr, local_index, grad_vec, emb_vec);
break;
}
default: {
FAIL();
}
}
}
}
private:
void ApplyLazyAdam(float lr,
int64_t local_index,
const std::vector<float>& grad_vec,
std::vector<float>& emb_vec)
{
auto& m_vec = optimizer_states_[0][local_index];
auto& v_vec = optimizer_states_[1][local_index];
float beta1t = per_embedding_states_[0][local_index];
float beta2t = per_embedding_states_[1][local_index];
beta1t *= beta1_;
beta2t *= beta2_;
per_embedding_states_[0][local_index] = beta1t;
per_embedding_states_[1][local_index] = beta2t;
for (int i = 0; i < embedding_dim_; i++) {
float grad_value = grad_vec[i];
float emb_value = emb_vec[i];
if (adam_w_) {
emb_value -= lr * weight_decay_ * emb_value;
} else {
grad_value += weight_decay_ * emb_value;
}
float m = m_vec[i];
float v = v_vec[i];
m = beta1_ * m + (1 - beta1_) * grad_value;
v = beta2_ * v + (1 - beta2_) * grad_value * grad_value;
float const mhat = m / (1 - beta1t);
float const vhat = v / (1 - beta2t);
emb_value = emb_value - lr * mhat / (sqrtf(vhat) + epsilon_);
emb_vec[i] = emb_value;
m_vec[i] = m;
v_vec[i] = v;
}
}
void ApplyAdaGrad(float lr,
int64_t local_index,
const std::vector<float>& grad_vec,
std::vector<float>& emb_vec)
{
auto& state_sum_vec = optimizer_states_[0][local_index];
for (int i = 0; i < embedding_dim_; i++) {
float grad_value = grad_vec[i];
float emb_value = emb_vec[i];
grad_value += weight_decay_ * emb_value;
float state_sum = state_sum_vec[i];
state_sum += grad_value * grad_value;
emb_value = emb_value - lr * grad_value / (sqrtf(state_sum) + epsilon_);
emb_vec[i] = emb_value;
state_sum_vec[i] = state_sum;
}
}
void ApplyRMSProp(float lr,
int64_t local_index,
const std::vector<float>& grad_vec,
std::vector<float>& emb_vec)
{
auto& v_vec = optimizer_states_[0][local_index];
for (int i = 0; i < embedding_dim_; i++) {
float grad_value = grad_vec[i];
float emb_value = emb_vec[i];
grad_value += weight_decay_ * emb_value;
auto v = v_vec[i];
v = alpha_ * v + (1 - alpha_) * grad_value * grad_value;
emb_value = emb_value - lr * grad_value / (sqrtf(v) + epsilon_);
emb_vec[i] = emb_value;
v_vec[i] = v;
}
}
void ApplySGD(float lr,
int64_t local_index,
const std::vector<float>& grad_vec,
std::vector<float>& emb_vec)
{
for (int i = 0; i < embedding_dim_; i++) {
float grad_value = grad_vec[i];
float emb_value = emb_vec[i];
grad_value += weight_decay_ * emb_value;
emb_value -= lr * grad_value;
emb_vec[i] = emb_value;
}
}
void parse_params()
{
for (auto& optimizer_param : params_->optimizer_params) {
auto name = optimizer_param.first;
float const value = optimizer_param.second;
if (name == "weight_decay") {
weight_decay_ = value;
} else if (name == "epsilon") {
epsilon_ = value;
} else if (name == "alpha") {
alpha_ = value;
} else if (name == "beta1") {
beta1_ = value;
} else if (name == "beta2") {
beta2_ = value;
} else if (name == "adam_w") {
adam_w_ = value > 0.5;
} else {
FAIL();
}
}
}
void create_optimizer_states()
{
switch (params_->optimizer_type) {
case WHOLEMEMORY_OPT_LAZY_ADAM: {
state_count_ = 2;
per_embedding_count_ = 2;
break;
}
case WHOLEMEMORY_OPT_SGD: {
state_count_ = 0;
per_embedding_count_ = 0;
break;
}
case WHOLEMEMORY_OPT_ADAGRAD: {
state_count_ = 1;
per_embedding_count_ = 1;
break;
}
case WHOLEMEMORY_OPT_RMSPROP: {
state_count_ = 1;
per_embedding_count_ = 1;
break;
}
default: {
FAIL();
}
}
embedding_dim_ = params_->grad_description.sizes[1];
optimizer_states_.resize(state_count_);
per_embedding_states_.resize(per_embedding_count_);
for (int i = 0; i < state_count_; i++) {
optimizer_states_[i].resize(end_entry_ - start_entry_);
for (int j = 0; j < end_entry_ - start_entry_; j++) {
optimizer_states_[i][j].resize(embedding_dim_, 0.0f);
}
}
for (int i = 0; i < per_embedding_count_; i++) {
per_embedding_states_[i].resize(end_entry_ - start_entry_, 1.0f);
}
}
EmbeddingBackwardTestParams* params_;
int64_t start_entry_;
int64_t end_entry_;
float weight_decay_ = 0.0f;
float epsilon_ = 1e-8f;
float alpha_ = 0.99f;
float beta1_ = 0.9f;
float beta2_ = 0.999f;
bool adam_w_ = false;
int embedding_dim_ = 0;
int state_count_ = 0;
int per_embedding_count_ = 0;
std::vector<std::vector<std::vector<float>>> optimizer_states_;
std::vector<std::vector<float>> per_embedding_states_;
};
void prepare_data_and_reference(
EmbeddingBackwardTestParams& params,
int world_size,
std::vector<std::vector<std::vector<int64_t>>>& step_rank_indices,
std::vector<std::vector<std::vector<std::vector<float>>>>& step_rank_grads,
std::vector<std::vector<float>>& start_embedding_table,
std::vector<std::vector<float>>& end_embedding_table)
{
step_rank_indices.resize(params.run_count);
step_rank_grads.resize(params.run_count);
for (int run = 0; run < params.run_count; run++) {
step_rank_indices[run].resize(world_size);
step_rank_grads[run].resize(world_size);
}
int cpu_count = GetProcessorCount();
int thread_count = std::max(1, cpu_count - 1); // reserve one core for other usage
int run_thread_count = std::min(thread_count, params.run_count * world_size);
MultiThreadRun(run_thread_count,
[&step_rank_indices, &step_rank_grads, ¶ms, world_size](
int thread_rank, int thread_world_size) {
for (int idx = thread_rank; idx < params.run_count * world_size;
idx += thread_world_size) {
int run = idx / world_size;
int world_rank = idx % world_size;
auto& indice = step_rank_indices[run][world_rank];
auto& grads = step_rank_grads[run][world_rank];
int rank_indice_count = params.indice_description.size;
indice.resize(rank_indice_count);
grads.resize(rank_indice_count);
wholememory_array_description_t init_indice_desc = params.indice_description;
init_indice_desc.dtype = WHOLEMEMORY_DT_INT64;
int64_t entry_count = params.embedding_description.sizes[0];
wholememory_ops::testing::host_random_init_indices(
indice.data(), init_indice_desc, entry_count);
for (int i = 0; i < rank_indice_count; i++) {
grads[i].resize(params.grad_description.sizes[1]);
wholememory_ops::testing::host_random_init_float(
grads[i].data(), params.grad_description.sizes[1], -5.0, 10);
}
}
});
start_embedding_table.resize(params.embedding_description.sizes[0]);
end_embedding_table.resize(params.embedding_description.sizes[0]);
MultiThreadRun(thread_count,
[¶ms, &start_embedding_table, &end_embedding_table](int thread_rank,
int thread_world_size) {
int64_t total_entry_count = start_embedding_table.size();
int64_t start_entry = thread_rank * total_entry_count / thread_world_size;
int64_t end_entry = (thread_rank + 1) * total_entry_count / thread_world_size;
int embedding_dim = params.grad_description.sizes[1];
for (int64_t entry = start_entry; entry < end_entry; entry++) {
start_embedding_table[entry].resize(embedding_dim);
wholememory_ops::testing::host_random_init_float(
start_embedding_table[entry].data(), embedding_dim, -10.0, 10);
end_embedding_table[entry] = start_embedding_table[entry];
}
});
MultiThreadRun(run_thread_count,
[world_size, ¶ms, &step_rank_indices, &step_rank_grads, &end_embedding_table](
int thread_rank, int thread_world_size) {
int64_t total_entry_count = end_embedding_table.size();
int64_t start_entry = thread_rank * total_entry_count / thread_world_size;
int64_t end_entry = (thread_rank + 1) * total_entry_count / thread_world_size;
CPUOptimizer cpu_optimizer(¶ms, start_entry, end_entry);
int embedding_dim = params.grad_description.sizes[1];
for (int step = 0; step <= params.run_count; step++) {
int step_id = std::min(step, params.run_count - 1);
std::vector<int64_t> indices;
std::vector<std::vector<float>> grads;
std::unordered_map<int64_t, int> indice_map;
for (int rank = 0; rank < world_size; rank++) {
auto& indices_vec = step_rank_indices[step_id][rank];
auto& grad_vec = step_rank_grads[step_id][rank];
int64_t rank_indice_count = indices_vec.size();
EXPECT_EQ(rank_indice_count, grad_vec.size());
for (int i = 0; i < rank_indice_count; i++) {
int64_t idx = indices_vec[i];
if (idx < start_entry || idx >= end_entry) continue;
auto& grad_data = grad_vec[i];
auto it = indice_map.find(idx);
if (it == indice_map.end()) {
indice_map[idx] = indices.size();
indices.push_back(idx);
grads.resize(grads.size() + 1);
grads.back() = grad_data;
} else {
int64_t array_idx = it->second;
for (int d = 0; d < embedding_dim; d++) {
grads[array_idx][d] += grad_data[d];
}
}
}
}
float lr = params.lr_;
cpu_optimizer.Apply(lr, indices, grads, end_embedding_table);
}
});
}
template <typename IndiceT>
void copy_indices(const int64_t* src_indice, IndiceT* dst_indice, int64_t indice_count)
{
for (int64_t i = 0; i < indice_count; i++) {
dst_indice[i] = src_indice[i];
}
}
static void host_expect_all_close(const float* data_ptr,
const float* ref_ptr,
const float* old_ptr,
int64_t count,
int64_t entry,
float atol = 1e-5,
float rtol = 1e-5)
{
int diff_count = 0;
for (int64_t i = 0; i < count && diff_count < 10; i++) {
float data = data_ptr[i];
float ref = ref_ptr[i];
float aerr = abs(data - ref);
if (aerr < atol) continue;
float rerr = aerr / std::max(abs(data), abs(ref));
if (rerr < rtol) continue;
diff_count++;
EXPECT_LT(rerr, rtol) << "data[" << entry << "][" << i << "]=" << data << ", but ref is " << ref
<< ", old is " << old_ptr[i];
}
}
TEST_P(WholeMemoryEmbeddingBackwardParameterTests, EmbeddingGatherGradientApplyTest)
{
auto params = GetParam();
EXPECT_EQ(params.embedding_description.sizes[1], params.grad_description.sizes[1]);
int dev_count = ForkGetDeviceCount();
EXPECT_GE(dev_count, 1);
if (dev_count == 1 && params.is_large_test()) {
GTEST_SKIP() << "skipping large test on single gpu";
}
std::vector<std::array<int, 2>> pipes;
CreatePipes(&pipes, dev_count);
std::vector<std::vector<std::vector<int64_t>>> step_rank_indices;
std::vector<std::vector<std::vector<std::vector<float>>>> step_rank_grads;
std::vector<std::vector<float>> start_embedding_table;
std::vector<std::vector<float>> ref_end_embedding_table;
prepare_data_and_reference(params,
dev_count,
step_rank_indices,
step_rank_grads,
start_embedding_table,
ref_end_embedding_table);
MultiProcessRun(
dev_count,
[¶ms,
&pipes,
&step_rank_indices,
&step_rank_grads,
&start_embedding_table,
&ref_end_embedding_table](int world_rank, int world_size) {
EXPECT_EQ(wholememory_init(0), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaSetDevice(world_rank), cudaSuccess);
wholememory_comm_t wm_comm = create_communicator_by_pipes(pipes, world_rank, world_size);
wholememory_comm_t cache_comm = wm_comm;
if (wholememory_communicator_support_type_location(
wm_comm, params.memory_type, params.memory_location) != WHOLEMEMORY_SUCCESS ||
(params.use_cache &&
wholememory_communicator_support_type_location(
cache_comm, params.cache_memory_type, params.cache_memory_location) !=
WHOLEMEMORY_SUCCESS)) {
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
if (world_rank == 0) GTEST_SKIP_("Skip due to not supported.");
return;
}
int64_t embedding_dim = params.embedding_description.sizes[1];
void *dev_indices = nullptr, *dev_grad_buffer = nullptr;
void *host_indices = nullptr, *host_grad_buffer = nullptr;
size_t grad_buffer_size = wholememory_get_memory_size_from_matrix(¶ms.grad_description);
size_t indices_buffer_size =
wholememory_get_memory_size_from_array(¶ms.indice_description);
cudaStream_t stream;
EXPECT_EQ(cudaStreamCreate(&stream), cudaSuccess);
EXPECT_EQ(cudaMallocHost(&host_indices, indices_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_indices, indices_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_grad_buffer, grad_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMallocHost(&host_grad_buffer, grad_buffer_size), cudaSuccess);
wholememory_tensor_t indices_tensor, grad_tensor;
wholememory_tensor_description_t indices_tensor_desc, grad_tensor_desc;
wholememory_copy_array_desc_to_tensor(&indices_tensor_desc, ¶ms.indice_description);
wholememory_copy_matrix_desc_to_tensor(&grad_tensor_desc, ¶ms.grad_description);
EXPECT_EQ(
wholememory_make_tensor_from_pointer(&indices_tensor, dev_indices, &indices_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(
wholememory_make_tensor_from_pointer(&grad_tensor, dev_grad_buffer, &grad_tensor_desc),
WHOLEMEMORY_SUCCESS);
wholememory_embedding_cache_policy_t cache_policy = params.get_cache_policy(cache_comm);
wholememory_embedding_t wm_embedding;
wholememory_tensor_description_t embedding_tensor_description;
wholememory_copy_matrix_desc_to_tensor(&embedding_tensor_description,
¶ms.embedding_description);
wholememory_embedding_optimizer_t optimizer;
EXPECT_EQ(wholememory_create_embedding_optimizer(&optimizer, params.optimizer_type),
WHOLEMEMORY_SUCCESS);
for (auto& param_name_value : params.optimizer_params) {
EXPECT_EQ(wholememory_optimizer_set_parameter(
optimizer, param_name_value.first.c_str(), ¶m_name_value.second),
WHOLEMEMORY_SUCCESS);
}
EXPECT_EQ(wholememory_create_embedding(&wm_embedding,
&embedding_tensor_description,
wm_comm,
params.memory_type,
params.memory_location,
optimizer,
cache_policy),
WHOLEMEMORY_SUCCESS);
wholememory_tensor_t embedding_tensor =
wholememory_embedding_get_embedding_tensor(wm_embedding);
wholememory_tensor_t local_embed_tensor;
EXPECT_EQ(wholememory_tensor_map_local_tensor(embedding_tensor, &local_embed_tensor),
WHOLEMEMORY_SUCCESS);
wholememory_handle_t embedding_handle =
wholememory_tensor_get_memory_handle(embedding_tensor);
auto entry_per_partition = wholememory_tensor_get_entry_per_partition(embedding_tensor);
int64_t total_entry_count = params.embedding_description.sizes[0];
int64_t rank_start_entry =
std::min<int64_t>(world_rank * entry_per_partition, total_entry_count);
int64_t rank_end_entry =
std::min<int64_t>((world_rank + 1) * entry_per_partition, total_entry_count);
int64_t rank_entry_count = rank_end_entry - rank_start_entry;
auto* dst_base_ptr =
static_cast<float*>(wholememory_tensor_get_data_pointer(local_embed_tensor));
size_t dst_stride = wholememory_tensor_get_tensor_description(local_embed_tensor)->strides[0];
size_t embedding_copy_size = embedding_dim * sizeof(float);
for (int64_t i = 0; i < rank_entry_count; i++) {
WM_CUDA_CHECK_NO_THROW(cudaMemcpy(dst_base_ptr + i * dst_stride,
start_embedding_table[rank_start_entry + i].data(),
embedding_copy_size,
cudaMemcpyHostToDevice));
}
EXPECT_EQ(cudaStreamSynchronize(nullptr), cudaSuccess);
EXPECT_EQ(wholememory_communicator_barrier(wm_comm), WHOLEMEMORY_SUCCESS);
for (int run = 0; run <= params.run_count; run++) {
int step_id = std::min(run, params.run_count - 1);
auto& rank_indices_vec = step_rank_indices[step_id][world_rank];
auto& rank_grads_vec = step_rank_grads[step_id][world_rank];
int64_t indice_count = rank_indices_vec.size();
if (params.indice_description.dtype == WHOLEMEMORY_DT_INT64) {
copy_indices(rank_indices_vec.data(), static_cast<int64_t*>(host_indices), indice_count);
} else {
copy_indices(rank_indices_vec.data(), static_cast<int*>(host_indices), indice_count);
}
float* host_grad_float_ptr = static_cast<float*>(host_grad_buffer);
size_t grad_stride = params.grad_description.stride;
size_t grad_vec_size = params.grad_description.sizes[1];
for (int64_t i = 0; i < indice_count; i++) {
memcpy(&host_grad_float_ptr[i * grad_stride],
rank_grads_vec[i].data(),
grad_vec_size * sizeof(float));
}
auto indice_dtype = params.indice_description.dtype;
EXPECT_EQ(cudaMemcpy(dev_indices,
host_indices,
indice_count * wholememory_dtype_get_element_size(indice_dtype),
cudaMemcpyHostToDevice),
cudaSuccess);
EXPECT_EQ(cudaMemcpy(dev_grad_buffer,
host_grad_buffer,
indice_count * grad_stride * sizeof(float),
cudaMemcpyHostToDevice),
cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(nullptr), cudaSuccess);
wholememory_embedding_gather_gradient_apply(wm_embedding,
indices_tensor,
grad_tensor,
true,
params.lr_,
wholememory::get_default_env_func(),
reinterpret_cast<int64_t>(stream));
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
EXPECT_EQ(wholememory_communicator_barrier(wm_comm), WHOLEMEMORY_SUCCESS);
}
EXPECT_EQ(
wholememory_embedding_writeback_cache(wm_embedding, reinterpret_cast<int64_t>(stream)),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
EXPECT_EQ(wholememory_communicator_barrier(wm_comm), WHOLEMEMORY_SUCCESS);
std::vector<std::vector<float>> local_end_embedding(rank_entry_count);
for (int64_t i = 0; i < rank_entry_count; i++) {
local_end_embedding[i].resize(embedding_dim);
EXPECT_EQ(cudaMemcpy(local_end_embedding[i].data(),
dst_base_ptr + i * dst_stride,
embedding_copy_size,
cudaMemcpyDeviceToHost),
cudaSuccess);
}
EXPECT_EQ(cudaStreamSynchronize(nullptr), cudaSuccess);
for (int64_t i = 0; i < rank_entry_count; i++) {
if (::testing::Test::HasFailure()) break;
host_expect_all_close(local_end_embedding[i].data(),
ref_end_embedding_table[i + rank_start_entry].data(),
start_embedding_table[i + rank_start_entry].data(),
embedding_dim,
i);
}
EXPECT_EQ(wholememory_destroy_embedding_cache_policy(cache_policy), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_destroy_tensor(indices_tensor), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_destroy_tensor(grad_tensor), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaFreeHost(host_indices), cudaSuccess);
EXPECT_EQ(cudaFree(dev_indices), cudaSuccess);
EXPECT_EQ(cudaFree(dev_grad_buffer), cudaSuccess);
EXPECT_EQ(cudaFreeHost(host_grad_buffer), cudaSuccess);
EXPECT_EQ(wholememory_destroy_embedding(wm_embedding), WHOLEMEMORY_SUCCESS);
wholememory_destroy_embedding_optimizer(optimizer);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
},
true);
}
INSTANTIATE_TEST_SUITE_P(
CachedEmbeddingGatherBackwardTest,
WholeMemoryEmbeddingBackwardParameterTests,
::testing::Values(
#if 0
EmbeddingBackwardTestParams(),
EmbeddingBackwardTestParams().set_optimizer_type(WHOLEMEMORY_OPT_RMSPROP),
EmbeddingBackwardTestParams().set_optimizer_type(WHOLEMEMORY_OPT_ADAGRAD),
EmbeddingBackwardTestParams().set_optimizer_type(WHOLEMEMORY_OPT_LAZY_ADAM),
EmbeddingBackwardTestParams().set_use_cache(),
EmbeddingBackwardTestParams().set_use_cache().set_optimizer_type(WHOLEMEMORY_OPT_RMSPROP),
EmbeddingBackwardTestParams().set_use_cache().set_optimizer_type(WHOLEMEMORY_OPT_ADAGRAD),
EmbeddingBackwardTestParams().set_use_cache().set_optimizer_type(WHOLEMEMORY_OPT_LAZY_ADAM),
EmbeddingBackwardTestParams().set_run_count(10),
EmbeddingBackwardTestParams().set_run_count(10).set_optimizer_type(WHOLEMEMORY_OPT_RMSPROP),
EmbeddingBackwardTestParams().set_run_count(10).set_optimizer_type(WHOLEMEMORY_OPT_ADAGRAD),
EmbeddingBackwardTestParams().set_run_count(10).set_optimizer_type(WHOLEMEMORY_OPT_LAZY_ADAM),
EmbeddingBackwardTestParams().set_run_count(10).set_use_cache(),
EmbeddingBackwardTestParams().set_run_count(10).set_use_cache().set_optimizer_type(WHOLEMEMORY_OPT_RMSPROP),
EmbeddingBackwardTestParams().set_run_count(10).set_use_cache().set_optimizer_type(WHOLEMEMORY_OPT_ADAGRAD),
EmbeddingBackwardTestParams().set_run_count(10).set_use_cache().set_optimizer_type(WHOLEMEMORY_OPT_LAZY_ADAM),
EmbeddingBackwardTestParams().set_use_cache().set_indice_count(10000127),
EmbeddingBackwardTestParams().set_use_cache().set_indice_count(10000127).set_optimizer_type(WHOLEMEMORY_OPT_RMSPROP),
EmbeddingBackwardTestParams().set_use_cache().set_indice_count(10000127).set_optimizer_type(WHOLEMEMORY_OPT_ADAGRAD),
EmbeddingBackwardTestParams().set_use_cache().set_indice_count(10000127).set_optimizer_type(WHOLEMEMORY_OPT_LAZY_ADAM),
#endif
EmbeddingBackwardTestParams().set_use_cache().set_grad_stride(131),
EmbeddingBackwardTestParams().set_use_cache().set_grad_stride(131).set_optimizer_type(
WHOLEMEMORY_OPT_RMSPROP),
EmbeddingBackwardTestParams().set_use_cache().set_grad_stride(131).set_optimizer_type(
WHOLEMEMORY_OPT_ADAGRAD),
EmbeddingBackwardTestParams().set_use_cache().set_grad_stride(131).set_optimizer_type(
WHOLEMEMORY_OPT_LAZY_ADAM),
EmbeddingBackwardTestParams()
.set_use_cache()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_optimizer_type(WHOLEMEMORY_OPT_LAZY_ADAM),
EmbeddingBackwardTestParams()
.set_use_cache()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_cache_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_optimizer_type(WHOLEMEMORY_OPT_LAZY_ADAM),
EmbeddingBackwardTestParams().set_use_cache().set_cache_ratio(0.07).set_optimizer_type(
WHOLEMEMORY_OPT_LAZY_ADAM),
EmbeddingBackwardTestParams().set_use_cache().set_cache_ratio(0.53).set_optimizer_type(
WHOLEMEMORY_OPT_LAZY_ADAM),
EmbeddingBackwardTestParams(),
EmbeddingBackwardTestParams().set_indice_dtype(WHOLEMEMORY_DT_INT),
EmbeddingBackwardTestParams()
.set_indice_dtype(WHOLEMEMORY_DT_INT)
.set_optimizer_type(WHOLEMEMORY_OPT_RMSPROP),
EmbeddingBackwardTestParams()
.set_indice_dtype(WHOLEMEMORY_DT_INT)
.set_optimizer_type(WHOLEMEMORY_OPT_ADAGRAD),
EmbeddingBackwardTestParams()
.set_indice_dtype(WHOLEMEMORY_DT_INT)
.set_optimizer_type(WHOLEMEMORY_OPT_LAZY_ADAM),
EmbeddingBackwardTestParams().set_use_cache().set_indice_dtype(WHOLEMEMORY_DT_INT),
EmbeddingBackwardTestParams()
.set_use_cache()
.set_indice_dtype(WHOLEMEMORY_DT_INT)
.set_optimizer_type(WHOLEMEMORY_OPT_RMSPROP),
EmbeddingBackwardTestParams()
.set_use_cache()
.set_indice_dtype(WHOLEMEMORY_DT_INT)
.set_optimizer_type(WHOLEMEMORY_OPT_ADAGRAD),
EmbeddingBackwardTestParams()
.set_use_cache()
.set_indice_dtype(WHOLEMEMORY_DT_INT)
.set_optimizer_type(WHOLEMEMORY_OPT_LAZY_ADAM),
EmbeddingBackwardTestParams().set_use_cache().set_embedding_dim(129),
EmbeddingBackwardTestParams().set_use_cache().set_embedding_dim(129).set_optimizer_type(
WHOLEMEMORY_OPT_RMSPROP),
EmbeddingBackwardTestParams().set_use_cache().set_embedding_dim(129).set_optimizer_type(
WHOLEMEMORY_OPT_ADAGRAD),
EmbeddingBackwardTestParams().set_use_cache().set_embedding_dim(129).set_optimizer_type(
WHOLEMEMORY_OPT_LAZY_ADAM),
EmbeddingBackwardTestParams()
.set_use_cache()
.set_embedding_dim(129)
.set_optimizer_type(WHOLEMEMORY_OPT_LAZY_ADAM)
.set_run_count(10)
.set_optimizer_params("beta1", 0.8),
EmbeddingBackwardTestParams()
.set_use_cache()
.set_embedding_dim(129)
.set_optimizer_type(WHOLEMEMORY_OPT_LAZY_ADAM)
.set_run_count(10)
.set_optimizer_params("beta2", 0.9),
EmbeddingBackwardTestParams().set_use_cache().set_embedding_dim(392),
EmbeddingBackwardTestParams().set_use_cache().set_embedding_dim(392).set_optimizer_type(
WHOLEMEMORY_OPT_RMSPROP),
EmbeddingBackwardTestParams().set_use_cache().set_embedding_dim(392).set_optimizer_type(
WHOLEMEMORY_OPT_ADAGRAD),
EmbeddingBackwardTestParams().set_use_cache().set_embedding_dim(392).set_optimizer_type(
WHOLEMEMORY_OPT_LAZY_ADAM),
EmbeddingBackwardTestParams()));
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/wholememory_ops/wholememory_embedding_tests.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <wholememory/embedding.h>
#include "../wholememory/wholememory_test_utils.hpp"
#include "embedding_test_utils.hpp"
#include "wholememory/env_func_ptrs.hpp"
struct EmbeddingTestParams {
EmbeddingTestParams()
{
const int64_t kDefaultEmbeddingEntryCount = 4000001;
const int64_t kDefaultEmbeddingDim = 127;
const int64_t kDefaultGatherIndiceCount = 100005;
int64_t embedding_sizes[2] = {kDefaultEmbeddingEntryCount, kDefaultEmbeddingDim};
embedding_description = wholememory_create_matrix_desc(
&embedding_sizes[0], kDefaultEmbeddingDim, 0, WHOLEMEMORY_DT_FLOAT);
indice_description =
wholememory_create_array_desc(kDefaultGatherIndiceCount, 0, WHOLEMEMORY_DT_INT64);
int64_t output_sizes[2] = {kDefaultGatherIndiceCount, kDefaultEmbeddingDim};
output_description = wholememory_create_matrix_desc(
&output_sizes[0], kDefaultEmbeddingDim, 0, WHOLEMEMORY_DT_FLOAT);
}
bool is_large_test()
{
int64_t embedding_table_mem_size =
wholememory_get_memory_element_count_from_matrix(&embedding_description) *
wholememory_dtype_get_element_size(embedding_description.dtype);
if (embedding_table_mem_size > 2LL * 1024 * 1024 * 1024) return true;
return false;
}
EmbeddingTestParams& set_entry_count(int64_t entry_count)
{
embedding_description.sizes[0] = entry_count;
return *this;
}
EmbeddingTestParams& set_embedding_dim(int embedding_dim)
{
embedding_description.sizes[1] = embedding_dim;
output_description.sizes[1] = embedding_dim;
embedding_description.stride = embedding_dim;
if (output_description.stride < embedding_dim) output_description.stride = embedding_dim;
return *this;
}
EmbeddingTestParams& set_embedding_stride(int stride)
{
embedding_description.stride = stride;
return *this;
}
EmbeddingTestParams& set_embedding_dtype(wholememory_dtype_t dtype)
{
embedding_description.dtype = dtype;
return *this;
}
EmbeddingTestParams& set_indice_count(int indice_count)
{
indice_description.size = indice_count;
output_description.sizes[0] = indice_count;
return *this;
}
EmbeddingTestParams& set_indice_dtype(wholememory_dtype_t dtype)
{
indice_description.dtype = dtype;
return *this;
}
EmbeddingTestParams& set_output_stride(int stride)
{
output_description.stride = stride;
return *this;
}
EmbeddingTestParams& set_output_dtype(wholememory_dtype_t dtype)
{
output_description.dtype = dtype;
return *this;
}
EmbeddingTestParams& set_memory_type(wholememory_memory_type_t mt)
{
memory_type = mt;
return *this;
}
EmbeddingTestParams& set_memory_location(wholememory_memory_location_t ml)
{
memory_location = ml;
return *this;
}
EmbeddingTestParams& set_cache_memory_type(wholememory_memory_type_t cmt)
{
cache_memory_type = cmt;
return *this;
}
EmbeddingTestParams& set_cache_memory_location(wholememory_memory_location_t cml)
{
cache_memory_location = cml;
return *this;
}
EmbeddingTestParams& set_cache_ratio(float ratio)
{
cache_ratio = ratio;
return *this;
}
wholememory_embedding_cache_policy_t get_cache_policy(wholememory_comm_t comm)
{
wholememory_embedding_cache_policy_t cache_policy = nullptr;
if (cache_type == 0) return nullptr;
EXPECT_EQ(wholememory_create_embedding_cache_policy(
&cache_policy,
comm,
cache_memory_type,
cache_memory_location,
cache_type == 1 ? WHOLEMEMORY_AT_READWRITE : WHOLEMEMORY_AT_READONLY,
cache_ratio),
WHOLEMEMORY_SUCCESS);
return cache_policy;
}
EmbeddingTestParams& non_cache()
{
cache_type = 0;
return *this;
}
EmbeddingTestParams& device_cache()
{
cache_type = 1;
return *this;
}
EmbeddingTestParams& local_cache()
{
cache_type = 2;
return *this;
}
EmbeddingTestParams& set_cache_group_count(int count)
{
cache_group_count = count;
return *this;
}
wholememory_array_description_t indice_description;
wholememory_matrix_description_t embedding_description;
wholememory_matrix_description_t output_description;
wholememory_memory_type_t memory_type = WHOLEMEMORY_MT_CHUNKED;
wholememory_memory_location_t memory_location = WHOLEMEMORY_ML_HOST;
wholememory_memory_type_t cache_memory_type = WHOLEMEMORY_MT_CHUNKED;
wholememory_memory_location_t cache_memory_location = WHOLEMEMORY_ML_DEVICE;
float cache_ratio = 0.2;
int cache_type = 0; // 0: no cache, 1: device cache, 2: local cache
int cache_group_count = 1;
};
class WholeMemoryEmbeddingParameterTests : public ::testing::TestWithParam<EmbeddingTestParams> {};
TEST_P(WholeMemoryEmbeddingParameterTests, EmbeddingGatherTest)
{
auto params = GetParam();
int dev_count = ForkGetDeviceCount();
EXPECT_GE(dev_count, 1);
if (dev_count % params.cache_group_count != 0) {
GTEST_SKIP() << "skipping test due to not enough GPUs group count=" << params.cache_group_count
<< ", but GPU count=" << dev_count;
}
if (dev_count == 1 && params.is_large_test()) {
GTEST_SKIP() << "skipping large test on single gpu";
}
std::vector<std::array<int, 2>> pipes;
CreatePipes(&pipes, dev_count);
MultiProcessRun(dev_count, [¶ms, &pipes](int world_rank, int world_size) {
EXPECT_EQ(wholememory_init(0), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaSetDevice(world_rank), cudaSuccess);
wholememory_comm_t wm_comm = create_communicator_by_pipes(pipes, world_rank, world_size);
wholememory_comm_t cache_comm = wm_comm;
if (wholememory_communicator_support_type_location(
wm_comm, params.memory_type, params.memory_location) != WHOLEMEMORY_SUCCESS) {
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
GTEST_SKIP_("Skip due to not supported.");
return;
}
if (params.cache_type == 2) {
cache_comm =
create_group_communicator_by_pipes(pipes, world_rank, world_size, params.cache_group_count);
}
if ((params.cache_type == 1 ||
params.cache_type == 2 && params.cache_group_count < world_size) &&
wholememory_communicator_support_type_location(
wm_comm, params.cache_memory_type, params.cache_memory_location) != WHOLEMEMORY_SUCCESS) {
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
GTEST_SKIP_("Skip due to cache memory type/location not supported.");
return;
}
void *dev_indices = nullptr, *dev_gather_buffer = nullptr, *dev_reference_buffer = nullptr;
void *host_indices = nullptr, *host_gather_buffer = nullptr, *host_reference_buffer = nullptr;
size_t gather_buffer_size = wholememory_get_memory_size_from_matrix(¶ms.output_description);
size_t indices_buffer_size = wholememory_get_memory_size_from_array(¶ms.indice_description);
cudaStream_t stream;
EXPECT_EQ(cudaStreamCreate(&stream), cudaSuccess);
EXPECT_EQ(cudaMallocHost(&host_indices, indices_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_indices, indices_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_gather_buffer, gather_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_reference_buffer, gather_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMallocHost(&host_gather_buffer, gather_buffer_size), cudaSuccess);
EXPECT_EQ(cudaMallocHost(&host_reference_buffer, gather_buffer_size), cudaSuccess);
wholememory_tensor_t indices_tensor, output_tensor;
wholememory_tensor_description_t indices_tensor_desc, output_tensor_desc;
wholememory_copy_array_desc_to_tensor(&indices_tensor_desc, ¶ms.indice_description);
wholememory_copy_matrix_desc_to_tensor(&output_tensor_desc, ¶ms.output_description);
EXPECT_EQ(
wholememory_make_tensor_from_pointer(&indices_tensor, dev_indices, &indices_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(
wholememory_make_tensor_from_pointer(&output_tensor, dev_gather_buffer, &output_tensor_desc),
WHOLEMEMORY_SUCCESS);
wholememory_embedding_cache_policy_t cache_policy = params.get_cache_policy(cache_comm);
wholememory_embedding_t wm_embedding;
wholememory_tensor_description_t embedding_tensor_description;
wholememory_copy_matrix_desc_to_tensor(&embedding_tensor_description,
¶ms.embedding_description);
EXPECT_EQ(wholememory_create_embedding(&wm_embedding,
&embedding_tensor_description,
wm_comm,
params.memory_type,
params.memory_location,
nullptr,
cache_policy),
WHOLEMEMORY_SUCCESS);
wholememory_tensor_t embedding_tensor =
wholememory_embedding_get_embedding_tensor(wm_embedding);
wholememory_handle_t embedding_handle = wholememory_tensor_get_memory_handle(embedding_tensor);
wholememory_matrix_description_t embedding_matrix_desc;
EXPECT_TRUE(wholememory_convert_tensor_desc_to_matrix(
&embedding_matrix_desc, wholememory_tensor_get_tensor_description(embedding_tensor)));
wholememory_ops::testing::device_random_init_local_embedding_table(
embedding_handle, embedding_matrix_desc, stream);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
wholememory_communicator_barrier(wm_comm);
for (int i = 0; i < 10; i++) {
wholememory_ops::testing::host_random_init_indices(
host_indices, params.indice_description, params.embedding_description.sizes[0]);
EXPECT_EQ(cudaMemcpyAsync(dev_indices,
host_indices,
wholememory_get_memory_size_from_array(¶ms.indice_description),
cudaMemcpyHostToDevice,
stream),
cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
wholememory_communicator_barrier(wm_comm);
EXPECT_EQ(wholememory_embedding_gather(wm_embedding,
indices_tensor,
output_tensor,
i % 2 == 0,
wholememory::get_default_env_func(),
(int64_t)stream),
WHOLEMEMORY_SUCCESS);
wholememory_ops::testing::device_get_expected_embedding(dev_reference_buffer,
params.output_description,
params.embedding_description.dtype,
dev_indices,
params.indice_description,
wholememory::get_default_env_func(),
stream);
EXPECT_EQ(cudaMemcpyAsync(host_gather_buffer,
dev_gather_buffer,
wholememory_get_memory_size_from_matrix(¶ms.output_description),
cudaMemcpyDeviceToHost,
stream),
cudaSuccess);
EXPECT_EQ(cudaMemcpyAsync(host_reference_buffer,
dev_reference_buffer,
wholememory_get_memory_size_from_matrix(¶ms.output_description),
cudaMemcpyDeviceToHost,
stream),
cudaSuccess);
EXPECT_EQ(cudaGetLastError(), cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
wholememory_ops::testing::host_check_embedding_same(host_gather_buffer,
params.output_description,
host_reference_buffer,
params.output_description);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
wholememory_communicator_barrier(wm_comm);
}
EXPECT_EQ(wholememory_destroy_embedding_cache_policy(cache_policy), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_destroy_tensor(indices_tensor), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_destroy_tensor(output_tensor), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaFreeHost(host_indices), cudaSuccess);
EXPECT_EQ(cudaFree(dev_indices), cudaSuccess);
EXPECT_EQ(cudaFree(dev_gather_buffer), cudaSuccess);
EXPECT_EQ(cudaFree(dev_reference_buffer), cudaSuccess);
EXPECT_EQ(cudaFreeHost(host_gather_buffer), cudaSuccess);
EXPECT_EQ(cudaFreeHost(host_reference_buffer), cudaSuccess);
EXPECT_EQ(wholememory_destroy_embedding(wm_embedding), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
});
}
INSTANTIATE_TEST_SUITE_P(
CachedEmbeddingGatherTest,
WholeMemoryEmbeddingParameterTests,
::testing::Values(
EmbeddingTestParams()
.local_cache()
.set_entry_count((1LL << 22LL) + 131)
.set_embedding_dim(256)
.set_cache_group_count(2)
.set_cache_ratio(0.1),
EmbeddingTestParams()
.local_cache()
.set_entry_count((1LL << 22LL) + 131)
.set_embedding_dim(256)
.set_cache_group_count(4)
.set_cache_ratio(0.05),
EmbeddingTestParams()
.local_cache()
.set_entry_count((1LL << 22LL) + 131)
.set_embedding_dim(256)
.set_cache_group_count(8)
.set_cache_ratio(0.02),
#if 1
EmbeddingTestParams().non_cache(),
EmbeddingTestParams().non_cache().set_memory_location(WHOLEMEMORY_ML_DEVICE),
EmbeddingTestParams().device_cache(),
EmbeddingTestParams().device_cache().set_cache_memory_type(WHOLEMEMORY_MT_DISTRIBUTED),
EmbeddingTestParams().local_cache(),
EmbeddingTestParams().local_cache().set_cache_memory_location(WHOLEMEMORY_ML_HOST),
EmbeddingTestParams()
.local_cache()
.set_memory_location(WHOLEMEMORY_ML_DEVICE)
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED),
EmbeddingTestParams().device_cache().set_cache_ratio(0.002),
EmbeddingTestParams().local_cache().set_cache_ratio(0.002),
EmbeddingTestParams()
.device_cache()
.set_cache_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_cache_ratio(0.002),
EmbeddingTestParams()
.local_cache()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_cache_ratio(0.002),
EmbeddingTestParams().non_cache().set_output_dtype(WHOLEMEMORY_DT_HALF),
EmbeddingTestParams().device_cache().set_output_dtype(WHOLEMEMORY_DT_HALF),
EmbeddingTestParams().local_cache().set_output_dtype(WHOLEMEMORY_DT_HALF),
EmbeddingTestParams().non_cache().set_indice_dtype(WHOLEMEMORY_DT_INT),
EmbeddingTestParams().device_cache().set_indice_dtype(WHOLEMEMORY_DT_INT),
EmbeddingTestParams().local_cache().set_indice_dtype(WHOLEMEMORY_DT_INT),
EmbeddingTestParams().non_cache().set_embedding_dtype(WHOLEMEMORY_DT_HALF),
EmbeddingTestParams().device_cache().set_embedding_dtype(WHOLEMEMORY_DT_HALF),
EmbeddingTestParams().local_cache().set_embedding_dtype(WHOLEMEMORY_DT_HALF),
EmbeddingTestParams()
.non_cache()
.set_memory_location(WHOLEMEMORY_ML_DEVICE)
.set_output_dtype(WHOLEMEMORY_DT_HALF),
EmbeddingTestParams()
.device_cache()
.set_cache_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_output_dtype(WHOLEMEMORY_DT_HALF),
EmbeddingTestParams()
.local_cache()
.set_memory_type(WHOLEMEMORY_MT_DISTRIBUTED)
.set_output_dtype(WHOLEMEMORY_DT_HALF),
EmbeddingTestParams().non_cache().set_embedding_dim(131),
EmbeddingTestParams().device_cache().set_embedding_dim(131),
EmbeddingTestParams().local_cache().set_embedding_dim(131),
EmbeddingTestParams().non_cache().set_embedding_dim(11).set_output_stride(11),
EmbeddingTestParams().device_cache().set_embedding_dim(11).set_output_stride(11),
EmbeddingTestParams().local_cache().set_embedding_dim(11).set_output_stride(11),
EmbeddingTestParams().non_cache().set_embedding_dim(1157).set_entry_count(300000),
EmbeddingTestParams().device_cache().set_embedding_dim(1157).set_entry_count(300000),
EmbeddingTestParams().local_cache().set_embedding_dim(1157).set_entry_count(300000),
EmbeddingTestParams().non_cache().set_output_stride(131),
EmbeddingTestParams().device_cache().set_output_stride(131),
EmbeddingTestParams().local_cache().set_output_stride(131),
// large tests
EmbeddingTestParams()
.non_cache()
.set_entry_count((1LL << 32LL) + 127)
.set_embedding_dim(3)
.set_embedding_stride(3),
EmbeddingTestParams()
.device_cache()
.set_entry_count((1LL << 32LL) + 127)
.set_embedding_dim(3)
.set_embedding_stride(3),
EmbeddingTestParams()
.local_cache()
.set_entry_count((1LL << 32LL) + 127)
.set_embedding_dim(3)
.set_embedding_stride(3),
EmbeddingTestParams()
.non_cache()
.set_entry_count((1LL << 31LL) - 127)
.set_embedding_dim(5)
.set_embedding_stride(5)
.set_indice_dtype(WHOLEMEMORY_DT_INT),
EmbeddingTestParams()
.device_cache()
.set_entry_count((1LL << 31LL) - 127)
.set_indice_dtype(WHOLEMEMORY_DT_INT)
.set_embedding_dim(5)
.set_embedding_stride(5),
EmbeddingTestParams()
.local_cache()
.set_entry_count((1LL << 31LL) - 127)
.set_indice_dtype(WHOLEMEMORY_DT_INT)
.set_embedding_dim(5)
.set_embedding_stride(5),
EmbeddingTestParams().non_cache().set_entry_count((1LL << 20LL) + 131).set_embedding_dim(1024),
EmbeddingTestParams()
.device_cache()
.set_entry_count((1LL << 20LL) + 131)
.set_embedding_dim(1024),
EmbeddingTestParams()
.local_cache()
.set_entry_count((1LL << 20LL) + 131)
.set_embedding_dim(1024),
EmbeddingTestParams().non_cache().set_entry_count((1LL << 23LL) + 127).set_embedding_dim(1025),
EmbeddingTestParams()
.device_cache()
.set_entry_count((1LL << 23LL) + 127)
.set_embedding_dim(1025),
EmbeddingTestParams()
.local_cache()
.set_entry_count((1LL << 23LL) + 127)
.set_embedding_dim(1025),
EmbeddingTestParams()
.non_cache()
.set_entry_count((1LL << 22LL) + 131)
.set_embedding_dim(11)
.set_embedding_stride(12),
EmbeddingTestParams()
.device_cache()
.set_entry_count((1LL << 22LL) + 131)
.set_embedding_dim(11)
.set_embedding_stride(12),
EmbeddingTestParams()
.local_cache()
.set_entry_count((1LL << 22LL) + 131)
.set_embedding_dim(11)
.set_embedding_stride(12),
#endif
EmbeddingTestParams()));
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/wholememory/wholememory_tensor_tests.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <cuda_runtime_api.h>
#include <wholememory/tensor_description.h>
#include <wholememory/wholememory.h>
#include <wholememory/wholememory_tensor.h>
#include "parallel_utils.hpp"
struct MatrixTestParam {
MatrixTestParam& set_row(int64_t r)
{
row = r;
return *this;
}
MatrixTestParam& set_col(int64_t c)
{
col = c;
return *this;
}
MatrixTestParam& set_dtype(wholememory_dtype_t dt)
{
dtype = dt;
return *this;
}
int64_t row = 256LL * 128LL;
int64_t col = 256LL;
wholememory_dtype_t dtype = WHOLEMEMORY_DT_FLOAT;
};
class WholeMemoryMatrixTest : public ::testing::TestWithParam<MatrixTestParam> {};
TEST(WholeMemoryMatrixTest, SubTensorTest)
{
MatrixTestParam params;
params.set_row(256LL * 128LL).set_col(256LL).set_dtype(WHOLEMEMORY_DT_INT);
MultiProcessRun(1, [¶ms](int world_rank, int world_size) {
EXPECT_EQ(wholememory_init(0), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaSetDevice(0), cudaSuccess);
wholememory_unique_id_t unique_id;
wholememory_comm_t wm_comm;
EXPECT_EQ(wholememory_create_unique_id(&unique_id), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_create_communicator(&wm_comm, unique_id, world_rank, world_size),
WHOLEMEMORY_SUCCESS);
int64_t sizes[2] = {params.row, params.col};
wholememory_matrix_description_t mat_desc =
wholememory_create_matrix_desc(sizes, params.col, 0, params.dtype);
wholememory_tensor_description_t tensor_desc;
wholememory_copy_matrix_desc_to_tensor(&tensor_desc, &mat_desc);
wholememory_tensor_t wholememory_tensor;
EXPECT_EQ(
wholememory_create_tensor(
&wholememory_tensor, &tensor_desc, wm_comm, WHOLEMEMORY_MT_CONTINUOUS, WHOLEMEMORY_ML_HOST),
WHOLEMEMORY_SUCCESS);
wholememory_handle_t wm_handle = wholememory_tensor_get_memory_handle(wholememory_tensor);
int* ptr = nullptr;
EXPECT_EQ(wholememory_get_global_pointer((void**)&ptr, wm_handle), WHOLEMEMORY_SUCCESS);
for (int64_t i = 0; i < params.row * params.col; i++) {
ptr[i] = i;
}
wholememory_tensor_t wholememory_sub_tensor_0, wholememory_sub_tensor_1;
wholememory_tensor_description_t sub_desc_0, sub_desc_1;
int64_t starts_0[2] = {1, 10};
int64_t ends_0[2] = {-1, 100};
int64_t starts_1[2] = {2, -1};
int64_t ends_1[2] = {10000, 80};
EXPECT_EQ(wholememory_tensor_get_subtensor(
wholememory_tensor, starts_0, ends_0, &wholememory_sub_tensor_0),
WHOLEMEMORY_SUCCESS);
sub_desc_0 = *wholememory_tensor_get_tensor_description(wholememory_sub_tensor_0);
EXPECT_EQ(sub_desc_0.dim, 2);
EXPECT_EQ(sub_desc_0.dtype, WHOLEMEMORY_DT_INT);
EXPECT_EQ(sub_desc_0.storage_offset, params.col * 1 + 10);
EXPECT_EQ(sub_desc_0.sizes[0], params.row - 1);
EXPECT_EQ(sub_desc_0.sizes[1], 90);
EXPECT_EQ(sub_desc_0.strides[0], 256);
EXPECT_EQ(sub_desc_0.strides[1], 1);
EXPECT_EQ(wholememory_tensor_get_subtensor(
wholememory_sub_tensor_0, starts_1, ends_1, &wholememory_sub_tensor_1),
WHOLEMEMORY_SUCCESS);
sub_desc_1 = *wholememory_tensor_get_tensor_description(wholememory_sub_tensor_1);
EXPECT_EQ(sub_desc_1.dim, 2);
EXPECT_EQ(sub_desc_1.dtype, WHOLEMEMORY_DT_INT);
EXPECT_EQ(sub_desc_1.storage_offset, params.col * 3 + 10);
EXPECT_EQ(sub_desc_1.sizes[0], 10000 - 2);
EXPECT_EQ(sub_desc_1.sizes[1], 80);
EXPECT_EQ(sub_desc_1.strides[0], 256);
EXPECT_EQ(sub_desc_1.strides[1], 1);
EXPECT_EQ(wholememory_destroy_tensor(wholememory_sub_tensor_0), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_destroy_tensor(wholememory_sub_tensor_1), WHOLEMEMORY_SUCCESS);
for (int64_t i = 0; i < params.row * params.col; i++) {
EXPECT_EQ(ptr[i], i);
}
EXPECT_EQ(wholememory_destroy_tensor(wholememory_tensor), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
});
}
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/wholememory/wholememory_handle_tests.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "parallel_utils.hpp"
#include "wholememory/communicator.hpp"
#include "wholememory/initialize.hpp"
#include "wholememory/memory_handle.hpp"
#include "wholememory_test_utils.hpp"
class WholeMemoryHandleCreateDestroyParameterTests
: public ::testing::TestWithParam<
std::tuple<size_t, wholememory_memory_type_t, wholememory_memory_location_t, size_t>> {};
TEST_P(WholeMemoryHandleCreateDestroyParameterTests, CreateDestroyTest)
{
auto params = GetParam();
int dev_count = ForkGetDeviceCount();
EXPECT_GE(dev_count, 1);
WHOLEMEMORY_CHECK(dev_count >= 1);
int nproc = dev_count;
std::vector<std::array<int, 2>> pipes;
CreatePipes(&pipes, dev_count);
MultiProcessRun(
nproc,
[&pipes, ¶ms](int rank, int world_size) {
EXPECT_EQ(wholememory_init(0), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaSetDevice(rank), cudaSuccess);
wholememory_comm_t wm_comm = create_communicator_by_pipes(pipes, rank, world_size);
if (wholememory_communicator_support_type_location(
wm_comm, std::get<1>(params), std::get<2>(params)) != WHOLEMEMORY_SUCCESS) {
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
if (rank == 0) GTEST_SKIP_("Skip due to not supported.");
return;
}
wholememory_handle_t handle1;
EXPECT_EQ(wholememory::create_wholememory(&handle1,
std::get<0>(params),
wm_comm,
std::get<1>(params),
std::get<2>(params),
std::get<2>(params)),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory::destroy_wholememory(handle1), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
},
true);
ClosePipes(&pipes);
}
INSTANTIATE_TEST_SUITE_P(
WholeMemoryHandleTests,
WholeMemoryHandleCreateDestroyParameterTests,
::testing::Values(
std::make_tuple(
1024UL * 1024UL * 512UL, WHOLEMEMORY_MT_CONTINUOUS, WHOLEMEMORY_ML_DEVICE, 128UL),
std::make_tuple(1024UL * 1024UL * 512UL, WHOLEMEMORY_MT_CONTINUOUS, WHOLEMEMORY_ML_HOST, 128UL),
std::make_tuple(1024UL * 1024UL * 512UL, WHOLEMEMORY_MT_CHUNKED, WHOLEMEMORY_ML_DEVICE, 128UL),
std::make_tuple(1024UL * 1024UL * 512UL, WHOLEMEMORY_MT_CHUNKED, WHOLEMEMORY_ML_HOST, 128UL),
std::make_tuple(
1024UL * 1024UL * 512UL, WHOLEMEMORY_MT_DISTRIBUTED, WHOLEMEMORY_ML_DEVICE, 128UL),
std::make_tuple(
1024UL * 1024UL * 512UL, WHOLEMEMORY_MT_DISTRIBUTED, WHOLEMEMORY_ML_HOST, 128UL),
std::make_tuple(
1024UL * 1024UL * 512UL, WHOLEMEMORY_MT_CONTINUOUS, WHOLEMEMORY_ML_DEVICE, 63UL),
std::make_tuple(1024UL * 1024UL * 512UL, WHOLEMEMORY_MT_CONTINUOUS, WHOLEMEMORY_ML_HOST, 63UL),
std::make_tuple(1024UL * 1024UL * 512UL, WHOLEMEMORY_MT_CHUNKED, WHOLEMEMORY_ML_DEVICE, 63UL),
std::make_tuple(1024UL * 1024UL * 512UL, WHOLEMEMORY_MT_CHUNKED, WHOLEMEMORY_ML_HOST, 63UL),
std::make_tuple(
1024UL * 1024UL * 512UL, WHOLEMEMORY_MT_DISTRIBUTED, WHOLEMEMORY_ML_DEVICE, 63UL),
std::make_tuple(1024UL * 1024UL * 512UL, WHOLEMEMORY_MT_DISTRIBUTED, WHOLEMEMORY_ML_HOST, 63UL),
std::make_tuple(
1024UL * 1024UL * 512UL, WHOLEMEMORY_MT_CONTINUOUS, WHOLEMEMORY_ML_HOST, 128UL)));
class WholeMemoryHandleMultiCreateParameterTests
: public ::testing::TestWithParam<
std::tuple<wholememory_memory_type_t, wholememory_memory_location_t>> {};
TEST_P(WholeMemoryHandleMultiCreateParameterTests, CreateDestroyTest)
{
auto params = GetParam();
int dev_count = ForkGetDeviceCount();
EXPECT_GE(dev_count, 1);
WHOLEMEMORY_CHECK(dev_count >= 1);
int nproc = dev_count;
std::vector<std::array<int, 2>> pipes;
CreatePipes(&pipes, dev_count);
MultiProcessRun(
nproc,
[&pipes, ¶ms](int rank, int world_size) {
EXPECT_EQ(wholememory_init(0), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaSetDevice(rank), cudaSuccess);
wholememory_comm_t wm_comm = create_communicator_by_pipes(pipes, rank, world_size);
if (wholememory_communicator_support_type_location(
wm_comm, std::get<0>(params), std::get<1>(params)) != WHOLEMEMORY_SUCCESS) {
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
if (rank == 0) GTEST_SKIP_("Skip due to not supported.");
return;
}
size_t total_size = 1024UL * 1024UL * 32;
size_t granularity = 128;
wholememory_handle_t handle1, handle2, handle3, handle4, handle5;
EXPECT_EQ(
wholememory::create_wholememory(
&handle1, total_size, wm_comm, std::get<0>(params), std::get<1>(params), granularity),
WHOLEMEMORY_SUCCESS);
// handle1: 0
EXPECT_EQ(handle1->handle_id, 0);
EXPECT_EQ(
wholememory::create_wholememory(
&handle2, total_size, wm_comm, std::get<0>(params), std::get<1>(params), granularity),
WHOLEMEMORY_SUCCESS);
// handle1: 0, handle2: 1
EXPECT_EQ(handle2->handle_id, 1);
EXPECT_EQ(
wholememory::create_wholememory(
&handle3, total_size, wm_comm, std::get<0>(params), std::get<1>(params), granularity),
WHOLEMEMORY_SUCCESS);
// handle1: 0, handle2: 1, handle3: 2
EXPECT_EQ(handle3->handle_id, 2);
EXPECT_EQ(wm_comm->wholememory_map.size(), 3);
EXPECT_EQ(wholememory::destroy_wholememory(handle2), WHOLEMEMORY_SUCCESS);
// handle1: 0, handle3: 2
EXPECT_EQ(wm_comm->wholememory_map.size(), 2);
EXPECT_EQ(
wholememory::create_wholememory(
&handle4, total_size, wm_comm, std::get<0>(params), std::get<1>(params), granularity),
WHOLEMEMORY_SUCCESS);
// handle1: 0, handle4: 1, handle3: 2
EXPECT_EQ(handle4->handle_id, 1);
EXPECT_EQ(wholememory::destroy_wholememory(handle1), WHOLEMEMORY_SUCCESS);
// handle4: 1, handle3: 2
EXPECT_EQ(wm_comm->wholememory_map.size(), 2);
EXPECT_EQ(wholememory::destroy_wholememory(handle3), WHOLEMEMORY_SUCCESS);
// handle4: 1
EXPECT_EQ(wm_comm->wholememory_map.size(), 1);
EXPECT_EQ(
wholememory::create_wholememory(
&handle5, total_size, wm_comm, std::get<0>(params), std::get<1>(params), granularity),
WHOLEMEMORY_SUCCESS);
// handle5: 0, handle4: 1
EXPECT_EQ(handle5->handle_id, 0);
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
},
true);
ClosePipes(&pipes);
}
#if 1
INSTANTIATE_TEST_SUITE_P(
WholeMemoryHandleTests,
WholeMemoryHandleMultiCreateParameterTests,
::testing::Values(std::make_tuple(WHOLEMEMORY_MT_CONTINUOUS, WHOLEMEMORY_ML_HOST),
std::make_tuple(WHOLEMEMORY_MT_CONTINUOUS, WHOLEMEMORY_ML_DEVICE),
std::make_tuple(WHOLEMEMORY_MT_CHUNKED, WHOLEMEMORY_ML_HOST),
std::make_tuple(WHOLEMEMORY_MT_CHUNKED, WHOLEMEMORY_ML_DEVICE),
std::make_tuple(WHOLEMEMORY_MT_DISTRIBUTED, WHOLEMEMORY_ML_HOST),
std::make_tuple(WHOLEMEMORY_MT_DISTRIBUTED, WHOLEMEMORY_ML_DEVICE)));
#endif
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/wholememory/wholememory_comm_tests.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "parallel_utils.hpp"
#include "wholememory/communicator.hpp"
#include "wholememory_test_utils.hpp"
TEST(WholeMemoryCommTest, SimpleCreateDestroyCommunicator)
{
int dev_count = ForkGetDeviceCount();
EXPECT_GE(dev_count, 1);
WHOLEMEMORY_CHECK(dev_count >= 1);
int nproc = dev_count;
std::vector<std::array<int, 2>> pipes;
CreatePipes(&pipes, dev_count);
MultiProcessRun(nproc, [&pipes](int rank, int world_size) {
EXPECT_EQ(cudaSetDevice(rank), cudaSuccess);
wholememory_comm_t wm_comm1 = create_communicator_by_pipes(pipes, rank, world_size);
EXPECT_EQ(wm_comm1->comm_id, 0);
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
});
}
TEST(WholeMemoryCommTest, CommunicatorFunctions)
{
int dev_count = ForkGetDeviceCount();
EXPECT_GE(dev_count, 1);
WHOLEMEMORY_CHECK(dev_count >= 1);
int nproc = dev_count;
std::vector<std::array<int, 2>> pipes;
CreatePipes(&pipes, dev_count);
MultiProcessRun(nproc, [&pipes](int rank, int world_size) {
EXPECT_EQ(cudaSetDevice(rank), cudaSuccess);
wholememory_comm_t wm_comm1 = create_communicator_by_pipes(pipes, rank, world_size);
EXPECT_EQ(wm_comm1->comm_id, 0);
int comm_rank = -1;
EXPECT_EQ(wholememory::communicator_get_rank(&comm_rank, wm_comm1), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(comm_rank, rank);
int comm_size = 0;
EXPECT_EQ(wholememory::communicator_get_size(&comm_size, wm_comm1), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(comm_size, world_size);
EXPECT_EQ(wholememory::is_intranode_communicator(wm_comm1), true);
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
});
}
TEST(WholeMemoryCommTest, MultipleCreateDestroyCommunicator)
{
int dev_count = ForkGetDeviceCount();
EXPECT_GE(dev_count, 1);
WHOLEMEMORY_CHECK(dev_count >= 1);
int nproc = dev_count;
std::vector<std::array<int, 2>> pipes;
CreatePipes(&pipes, dev_count);
MultiProcessRun(nproc, [&pipes](int rank, int world_size) {
EXPECT_EQ(cudaSetDevice(rank), cudaSuccess);
wholememory_comm_t wm_comm1 = create_communicator_by_pipes(pipes, rank, world_size);
EXPECT_EQ(wm_comm1->comm_id, 0);
wholememory_comm_t wm_comm2 = create_communicator_by_pipes(pipes, rank, world_size);
EXPECT_EQ(wm_comm2->comm_id, 1);
EXPECT_EQ(wholememory::destroy_communicator(wm_comm1), WHOLEMEMORY_SUCCESS);
wholememory_comm_t wm_comm3 = create_communicator_by_pipes(pipes, rank, world_size);
EXPECT_EQ(wm_comm3->comm_id, 0);
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
wholememory_comm_t wm_comm4 = create_communicator_by_pipes(pipes, rank, world_size);
EXPECT_EQ(wm_comm4->comm_id, 0);
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
});
}
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/wholememory/wholememory_test_utils.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <functional>
#include <thread>
#include "parallel_utils.hpp"
#include "wholememory/communicator.hpp"
wholememory_comm_t create_communicator_by_pipes(const std::vector<std::array<int, 2>>& pipes,
int rank,
int world_size)
{
wholememory_unique_id_t unique_id;
if (rank == 0) {
WHOLEMEMORY_CHECK_NOTHROW(wholememory::create_unique_id(&unique_id) == WHOLEMEMORY_SUCCESS);
}
PipeBroadcast(rank, world_size, 0, pipes, &unique_id);
wholememory_comm_t wm_comm;
WHOLEMEMORY_CHECK_NOTHROW(
wholememory::create_communicator(&wm_comm, unique_id, rank, world_size) == WHOLEMEMORY_SUCCESS);
return wm_comm;
}
wholememory_comm_t create_group_communicator_by_pipes(const std::vector<std::array<int, 2>>& pipes,
int rank,
int world_size,
int group_count)
{
WHOLEMEMORY_CHECK_NOTHROW(world_size % group_count == 0);
int group_size = world_size / group_count;
int group_rank = rank % group_size;
wholememory_unique_id_t unique_id;
if (group_rank == 0) {
WHOLEMEMORY_CHECK_NOTHROW(wholememory::create_unique_id(&unique_id) == WHOLEMEMORY_SUCCESS);
}
wholememory_unique_id_t comm_unique_id;
for (int g = 0; g < group_count; g++) {
if (g * group_size == rank) comm_unique_id = unique_id;
PipeBroadcast(rank, world_size, g * group_size, pipes, &comm_unique_id);
if (rank / group_size == g) unique_id = comm_unique_id;
}
wholememory_comm_t wm_comm;
WHOLEMEMORY_CHECK_NOTHROW(wholememory::create_communicator(
&wm_comm, unique_id, group_rank, group_size) == WHOLEMEMORY_SUCCESS);
return wm_comm;
}
wholememory_comm_t create_communicator_by_socket(SideBandCommunicator* side_band_communicator,
int rank,
int world_size)
{
wholememory_unique_id_t unique_id;
if (rank == 0) {
WHOLEMEMORY_CHECK_NOTHROW(wholememory::create_unique_id(&unique_id) == WHOLEMEMORY_SUCCESS);
}
SideBandBroadcast(side_band_communicator, &unique_id, sizeof(wholememory_unique_id_t), 0);
wholememory_comm_t wm_comm;
WHOLEMEMORY_CHECK_NOTHROW(
wholememory::create_communicator(&wm_comm, unique_id, rank, world_size) == WHOLEMEMORY_SUCCESS);
return wm_comm;
}
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/graph_ops/csr_add_self_loop_tests.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "wholememory/env_func_ptrs.hpp"
#include <wholememory/graph_op.h>
#include <wholememory/tensor_description.h>
#include "../wholegraph_ops/graph_sampling_test_utils.hpp"
#include "csr_add_self_loop_utils.hpp"
#include "error.hpp"
typedef struct CsrAddSelfLoopTestParam {
CsrAddSelfLoopTestParam& set_graph_row_num(int new_graph_row_num)
{
graph_row_num = new_graph_row_num;
return *this;
}
CsrAddSelfLoopTestParam& set_graph_col_num(int new_graph_col_num)
{
graph_col_num = new_graph_col_num;
return *this;
}
CsrAddSelfLoopTestParam& set_graph_edge_num(int new_graph_edge_num)
{
graph_edge_num = new_graph_edge_num;
return *this;
}
wholememory_array_description_t get_csr_row_ptr_array_desc() const
{
return wholememory_create_array_desc(graph_row_num + 1, 0, WHOLEMEMORY_DT_INT);
}
wholememory_array_description_t get_csr_col_ptr_array_desc() const
{
return wholememory_create_array_desc(graph_edge_num, 0, WHOLEMEMORY_DT_INT);
}
int get_graph_row_num() const { return graph_row_num; }
int get_graph_col_num() const { return graph_col_num; }
int get_graph_edge_num() const { return graph_edge_num; }
int graph_row_num = 3;
int graph_col_num = 5;
int graph_edge_num = 9;
} CsrAddSelfLoopTestParam;
class GraphCsrAddSelfLoopParameterTests : public ::testing::TestWithParam<CsrAddSelfLoopTestParam> {
};
TEST_P(GraphCsrAddSelfLoopParameterTests, CsrAddSelfLoopParameterTest)
{
auto params = GetParam();
int dev_count;
EXPECT_EQ(cudaGetDeviceCount(&dev_count), cudaSuccess);
EXPECT_GE(dev_count, 1);
cudaStream_t stream;
EXPECT_EQ(cudaStreamCreate(&stream), cudaSuccess);
auto graph_row_num = params.get_graph_row_num();
auto graph_col_num = params.get_graph_col_num();
auto graph_edge_num = params.get_graph_edge_num();
auto csr_row_ptr_array_desc = params.get_csr_row_ptr_array_desc();
auto csr_col_ptr_array_desc = params.get_csr_col_ptr_array_desc();
void* host_csr_row_ptr =
(void*)malloc(wholememory_get_memory_size_from_array(&csr_row_ptr_array_desc));
void* host_csr_col_ptr =
(void*)malloc(wholememory_get_memory_size_from_array(&csr_col_ptr_array_desc));
graph_ops::testing::gen_local_csr_graph(graph_row_num,
graph_col_num,
graph_edge_num,
host_csr_row_ptr,
csr_row_ptr_array_desc,
host_csr_col_ptr,
csr_col_ptr_array_desc);
int output_edge_num = graph_edge_num + graph_row_num;
auto output_csr_col_ptr_array_desc =
wholememory_create_array_desc(output_edge_num, 0, WHOLEMEMORY_DT_INT);
void* host_output_csr_row_ptr =
(void*)malloc(wholememory_get_memory_size_from_array(&csr_row_ptr_array_desc));
void* host_output_csr_col_ptr =
(void*)malloc(wholememory_get_memory_size_from_array(&output_csr_col_ptr_array_desc));
void* host_ref_output_csr_row_ptr =
(void*)malloc(wholememory_get_memory_size_from_array(&csr_row_ptr_array_desc));
void* host_ref_output_csr_col_ptr =
(void*)malloc(wholememory_get_memory_size_from_array(&output_csr_col_ptr_array_desc));
void *dev_csr_row_ptr, *dev_csr_col_ptr, *dev_output_csr_row_ptr, *dev_output_csr_col_ptr;
EXPECT_EQ(
cudaMalloc(&dev_csr_row_ptr, wholememory_get_memory_size_from_array(&csr_row_ptr_array_desc)),
cudaSuccess);
EXPECT_EQ(
cudaMalloc(&dev_csr_col_ptr, wholememory_get_memory_size_from_array(&csr_col_ptr_array_desc)),
cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_output_csr_row_ptr,
wholememory_get_memory_size_from_array(&csr_row_ptr_array_desc)),
cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_output_csr_col_ptr,
wholememory_get_memory_size_from_array(&output_csr_col_ptr_array_desc)),
cudaSuccess);
EXPECT_EQ(cudaMemcpy(dev_csr_row_ptr,
host_csr_row_ptr,
wholememory_get_memory_size_from_array(&csr_row_ptr_array_desc),
cudaMemcpyHostToDevice),
cudaSuccess);
EXPECT_EQ(cudaMemcpy(dev_csr_col_ptr,
host_csr_col_ptr,
wholememory_get_memory_size_from_array(&csr_col_ptr_array_desc),
cudaMemcpyHostToDevice),
cudaSuccess);
wholememory_tensor_description_t csr_row_ptr_tensor_desc, csr_col_ptr_tensor_desc,
output_csr_row_ptr_tensor_desc, output_csr_col_ptr_tensor_desc;
wholememory_tensor_t csr_row_ptr_tensor, csr_col_ptr_tensor, output_csr_row_ptr_tensor,
output_csr_col_ptr_tensor;
wholememory_copy_array_desc_to_tensor(&csr_row_ptr_tensor_desc, &csr_row_ptr_array_desc);
wholememory_copy_array_desc_to_tensor(&csr_col_ptr_tensor_desc, &csr_col_ptr_array_desc);
wholememory_copy_array_desc_to_tensor(&output_csr_row_ptr_tensor_desc, &csr_row_ptr_array_desc);
wholememory_copy_array_desc_to_tensor(&output_csr_col_ptr_tensor_desc,
&output_csr_col_ptr_array_desc);
EXPECT_EQ(wholememory_make_tensor_from_pointer(
&csr_row_ptr_tensor, dev_csr_row_ptr, &csr_row_ptr_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_make_tensor_from_pointer(
&csr_col_ptr_tensor, dev_csr_col_ptr, &csr_col_ptr_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_make_tensor_from_pointer(
&output_csr_row_ptr_tensor, dev_output_csr_row_ptr, &output_csr_row_ptr_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_make_tensor_from_pointer(
&output_csr_col_ptr_tensor, dev_output_csr_col_ptr, &output_csr_col_ptr_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(csr_add_self_loop(csr_row_ptr_tensor,
csr_col_ptr_tensor,
output_csr_row_ptr_tensor,
output_csr_col_ptr_tensor,
stream),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
EXPECT_EQ(cudaMemcpy(host_output_csr_row_ptr,
dev_output_csr_row_ptr,
wholememory_get_memory_size_from_array(&csr_row_ptr_array_desc),
cudaMemcpyDeviceToHost),
cudaSuccess);
EXPECT_EQ(cudaMemcpy(host_output_csr_col_ptr,
dev_output_csr_col_ptr,
wholememory_get_memory_size_from_array(&output_csr_col_ptr_array_desc),
cudaMemcpyDeviceToHost),
cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
graph_ops::testing::host_csr_add_self_loop(host_csr_row_ptr,
csr_row_ptr_array_desc,
host_csr_col_ptr,
csr_col_ptr_array_desc,
host_ref_output_csr_row_ptr,
csr_row_ptr_array_desc,
host_ref_output_csr_col_ptr,
output_csr_col_ptr_array_desc);
wholegraph_ops::testing::host_check_two_array_same(host_output_csr_row_ptr,
csr_row_ptr_array_desc,
host_ref_output_csr_row_ptr,
csr_row_ptr_array_desc);
wholegraph_ops::testing::host_check_two_array_same(host_output_csr_col_ptr,
output_csr_col_ptr_array_desc,
host_ref_output_csr_col_ptr,
output_csr_col_ptr_array_desc);
EXPECT_EQ(cudaFree(dev_csr_row_ptr), cudaSuccess);
EXPECT_EQ(cudaFree(dev_csr_col_ptr), cudaSuccess);
EXPECT_EQ(cudaFree(dev_output_csr_row_ptr), cudaSuccess);
EXPECT_EQ(cudaFree(dev_output_csr_col_ptr), cudaSuccess);
if (host_csr_row_ptr != nullptr) free(host_csr_row_ptr);
if (host_csr_col_ptr != nullptr) free(host_csr_col_ptr);
if (host_output_csr_row_ptr != nullptr) free(host_output_csr_row_ptr);
if (host_output_csr_col_ptr != nullptr) free(host_output_csr_col_ptr);
if (host_ref_output_csr_row_ptr != nullptr) free(host_ref_output_csr_row_ptr);
if (host_ref_output_csr_col_ptr != nullptr) free(host_ref_output_csr_col_ptr);
EXPECT_EQ(cudaStreamDestroy(stream), cudaSuccess);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
}
INSTANTIATE_TEST_SUITE_P(CsrAddSelfLoopOpTests,
GraphCsrAddSelfLoopParameterTests,
::testing::Values(CsrAddSelfLoopTestParam()
.set_graph_row_num(1357)
.set_graph_col_num(2589)
.set_graph_edge_num(19087)));
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/graph_ops/append_unique_tests.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include <cstdio>
#include <gtest/gtest.h>
#include "../wholegraph_ops/graph_sampling_test_utils.hpp"
#include "../wholememory/wholememory_test_utils.hpp"
#include "append_unique_test_utils.hpp"
#include "parallel_utils.hpp"
#include "wholememory/communicator.hpp"
#include "wholememory/env_func_ptrs.hpp"
#include "wholememory/initialize.hpp"
#include <wholememory/graph_op.h>
#include <wholememory/tensor_description.h>
typedef struct GraphAppendUniqueTestParam {
GraphAppendUniqueTestParam& set_target_node_count(int new_target_node_count)
{
target_node_count = new_target_node_count;
return *this;
}
GraphAppendUniqueTestParam& set_neighbor_node_count(int new_neighbor_node_count)
{
neighbor_node_count = new_neighbor_node_count;
return *this;
}
GraphAppendUniqueTestParam& set_target_dtype(wholememory_dtype_t new_target_node_dtype)
{
target_node_dtype = new_target_node_dtype;
neighbor_node_dtype = new_target_node_dtype;
return *this;
}
wholememory_array_description_t get_target_node_desc() const
{
return wholememory_create_array_desc(target_node_count, 0, target_node_dtype);
}
wholememory_array_description_t get_neighbor_node_desc() const
{
return wholememory_create_array_desc(neighbor_node_count, 0, neighbor_node_dtype);
}
int64_t get_target_node_count() const { return target_node_count; }
int64_t get_neighbor_node_count() const { return neighbor_node_count; }
wholememory_dtype_t target_node_dtype = WHOLEMEMORY_DT_INT;
wholememory_dtype_t neighbor_node_dtype = target_node_dtype;
int64_t target_node_count = 10;
int64_t neighbor_node_count = 100;
} GraphAppendUniqueTestParam;
class GraphAppendUniqueParameterTests
: public ::testing::TestWithParam<GraphAppendUniqueTestParam> {};
TEST_P(GraphAppendUniqueParameterTests, AppendUniqueTest)
{
auto params = GetParam();
int dev_count;
EXPECT_EQ(cudaGetDeviceCount(&dev_count), cudaSuccess);
EXPECT_GE(dev_count, 1);
cudaStream_t stream;
EXPECT_EQ(cudaStreamCreate(&stream), cudaSuccess);
auto target_node_count = params.get_target_node_count();
auto neighbor_node_count = params.get_neighbor_node_count();
auto target_node_desc = params.get_target_node_desc();
auto neighbor_node_desc = params.get_neighbor_node_desc();
size_t target_node_size = wholememory_get_memory_size_from_array(&target_node_desc);
size_t neighbor_node_size = wholememory_get_memory_size_from_array(&neighbor_node_desc);
void *host_target_nodes_ptr = nullptr, *host_neighbor_nodes_ptr = nullptr;
void *dev_target_nodes_ptr = nullptr, *dev_neighbor_nodes_ptr = nullptr;
void *host_output_unique_nodes_ptr = nullptr, *ref_host_output_unique_nodes_ptr = nullptr;
int *host_output_neighbor_raw_to_unique_mapping_ptr = nullptr,
*ref_host_output_neighbor_raw_to_unique_mapping_ptr = nullptr;
int* dev_output_neighbor_raw_to_unique_mapping_ptr = nullptr;
wholememory_array_description_t neighbor_raw_to_unique_mapping_desc =
wholememory_create_array_desc(neighbor_node_count, 0, WHOLEMEMORY_DT_INT);
EXPECT_EQ(cudaMallocHost(&host_target_nodes_ptr, target_node_size), cudaSuccess);
EXPECT_EQ(cudaMallocHost(&host_neighbor_nodes_ptr, neighbor_node_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_target_nodes_ptr, target_node_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_neighbor_nodes_ptr, neighbor_node_size), cudaSuccess);
EXPECT_EQ(
cudaMalloc(&dev_output_neighbor_raw_to_unique_mapping_ptr, neighbor_node_count * sizeof(int)),
cudaSuccess);
int64_t total_node_count = neighbor_node_count + target_node_count;
graph_ops::testing::gen_node_ids(host_target_nodes_ptr, target_node_desc, total_node_count, true);
graph_ops::testing::gen_node_ids(
host_neighbor_nodes_ptr, neighbor_node_desc, total_node_count, false);
EXPECT_EQ(cudaMemcpyAsync(dev_target_nodes_ptr,
host_target_nodes_ptr,
target_node_size,
cudaMemcpyHostToDevice,
stream),
cudaSuccess);
EXPECT_EQ(cudaMemcpyAsync(dev_neighbor_nodes_ptr,
host_neighbor_nodes_ptr,
neighbor_node_size,
cudaMemcpyHostToDevice,
stream),
cudaSuccess);
wholememory_tensor_t target_node_tensor, neighbor_node_tensor,
output_neighbor_raw_to_unique_mapping_tensor;
wholememory_tensor_description_t target_node_tensor_desc, neighbor_node_tensor_desc,
output_neighbor_raw_to_unique_mapping_tensor_desc;
wholememory_copy_array_desc_to_tensor(&target_node_tensor_desc, &target_node_desc);
wholememory_copy_array_desc_to_tensor(&neighbor_node_tensor_desc, &neighbor_node_desc);
wholememory_copy_array_desc_to_tensor(&output_neighbor_raw_to_unique_mapping_tensor_desc,
&neighbor_raw_to_unique_mapping_desc);
EXPECT_EQ(wholememory_make_tensor_from_pointer(
&target_node_tensor, dev_target_nodes_ptr, &target_node_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_make_tensor_from_pointer(
&neighbor_node_tensor, dev_neighbor_nodes_ptr, &neighbor_node_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(
wholememory_make_tensor_from_pointer(&output_neighbor_raw_to_unique_mapping_tensor,
dev_output_neighbor_raw_to_unique_mapping_ptr,
&output_neighbor_raw_to_unique_mapping_tensor_desc),
WHOLEMEMORY_SUCCESS);
wholememory_env_func_t* default_env_func = wholememory::get_default_env_func();
wholememory::default_memory_context_t output_unique_node_memory_ctx;
EXPECT_EQ(graph_append_unique(target_node_tensor,
neighbor_node_tensor,
&output_unique_node_memory_ctx,
output_neighbor_raw_to_unique_mapping_tensor,
default_env_func,
stream),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaGetLastError(), cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
int total_unique_count = output_unique_node_memory_ctx.desc.sizes[0];
host_output_unique_nodes_ptr =
malloc(total_unique_count * wholememory_dtype_get_element_size(target_node_desc.dtype));
host_output_neighbor_raw_to_unique_mapping_ptr = (int*)malloc(neighbor_node_count * sizeof(int));
EXPECT_EQ(
cudaMemcpyAsync(host_output_unique_nodes_ptr,
output_unique_node_memory_ctx.ptr,
total_unique_count * wholememory_dtype_get_element_size(target_node_desc.dtype),
cudaMemcpyDeviceToHost,
stream),
cudaSuccess);
EXPECT_EQ(cudaMemcpyAsync(host_output_neighbor_raw_to_unique_mapping_ptr,
dev_output_neighbor_raw_to_unique_mapping_ptr,
neighbor_node_count * sizeof(int),
cudaMemcpyDeviceToHost,
stream),
cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
int ref_total_unique_node_count;
graph_ops::testing::host_append_unique(host_target_nodes_ptr,
target_node_desc,
host_neighbor_nodes_ptr,
neighbor_node_desc,
&ref_total_unique_node_count,
&ref_host_output_unique_nodes_ptr);
EXPECT_EQ(total_unique_count, ref_total_unique_node_count);
graph_ops::testing::host_gen_append_unique_neighbor_raw_to_unique(
host_output_unique_nodes_ptr,
wholememory_create_array_desc(total_unique_count, 0, target_node_desc.dtype),
host_neighbor_nodes_ptr,
neighbor_node_desc,
(void**)&ref_host_output_neighbor_raw_to_unique_mapping_ptr,
neighbor_raw_to_unique_mapping_desc);
if (target_node_desc.dtype == WHOLEMEMORY_DT_INT) {
std::sort(static_cast<int*>(host_output_unique_nodes_ptr) + target_node_count,
static_cast<int*>(host_output_unique_nodes_ptr) + total_unique_count);
std::sort(static_cast<int*>(ref_host_output_unique_nodes_ptr) + target_node_count,
static_cast<int*>(ref_host_output_unique_nodes_ptr) + total_unique_count);
} else if (target_node_desc.dtype == WHOLEMEMORY_DT_INT64) {
std::sort(static_cast<int64_t*>(host_output_unique_nodes_ptr) + target_node_count,
static_cast<int64_t*>(host_output_unique_nodes_ptr) + total_unique_count);
std::sort(static_cast<int64_t*>(ref_host_output_unique_nodes_ptr) + target_node_count,
static_cast<int64_t*>(ref_host_output_unique_nodes_ptr) + total_unique_count);
}
wholegraph_ops::testing::host_check_two_array_same(
host_output_unique_nodes_ptr,
wholememory_create_array_desc(total_unique_count, 0, target_node_desc.dtype),
ref_host_output_unique_nodes_ptr,
wholememory_create_array_desc(ref_total_unique_node_count, 0, target_node_desc.dtype));
wholegraph_ops::testing::host_check_two_array_same(
host_output_neighbor_raw_to_unique_mapping_ptr,
neighbor_raw_to_unique_mapping_desc,
ref_host_output_neighbor_raw_to_unique_mapping_ptr,
neighbor_raw_to_unique_mapping_desc);
(default_env_func->output_fns).free_fn(&output_unique_node_memory_ctx, nullptr);
if (host_output_unique_nodes_ptr != nullptr) { free(host_output_unique_nodes_ptr); }
if (host_output_neighbor_raw_to_unique_mapping_ptr != nullptr) {
free(host_output_neighbor_raw_to_unique_mapping_ptr);
}
if (ref_host_output_unique_nodes_ptr != nullptr) { free(ref_host_output_unique_nodes_ptr); }
if (ref_host_output_neighbor_raw_to_unique_mapping_ptr != nullptr) {
free(ref_host_output_neighbor_raw_to_unique_mapping_ptr);
}
EXPECT_EQ(cudaFreeHost(host_target_nodes_ptr), cudaSuccess);
EXPECT_EQ(cudaFreeHost(host_neighbor_nodes_ptr), cudaSuccess);
EXPECT_EQ(cudaFree(dev_target_nodes_ptr), cudaSuccess);
EXPECT_EQ(cudaFree(dev_neighbor_nodes_ptr), cudaSuccess);
EXPECT_EQ(cudaFree(dev_output_neighbor_raw_to_unique_mapping_ptr), cudaSuccess);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
}
INSTANTIATE_TEST_SUITE_P(GraphAppendUniqueOpTests,
GraphAppendUniqueParameterTests,
::testing::Values(GraphAppendUniqueTestParam()
.set_target_node_count(3)
.set_neighbor_node_count(10),
GraphAppendUniqueTestParam()
.set_target_node_count(53)
.set_neighbor_node_count(123)
.set_target_dtype(WHOLEMEMORY_DT_INT),
GraphAppendUniqueTestParam()
.set_target_node_count(57)
.set_neighbor_node_count(1235)
.set_target_dtype(WHOLEMEMORY_DT_INT64)));
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/graph_ops/append_unique_test_utils.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/tensor_description.h>
namespace graph_ops {
namespace testing {
void gen_node_ids(void* host_target_nodes_ptr,
wholememory_array_description_t node_desc,
int64_t range,
bool unique);
void host_append_unique(void* target_nodes_ptr,
wholememory_array_description_t target_nodes_desc,
void* neighbor_nodes_ptr,
wholememory_array_description_t neighbor_nodes_desc,
int* host_total_unique_count,
void** host_output_unique_nodes_ptr);
void host_gen_append_unique_neighbor_raw_to_unique(
void* host_output_unique_nodes_ptr,
wholememory_array_description_t output_unique_nodes_desc,
void* host_neighbor_nodes_ptr,
wholememory_array_description_t neighbor_nodes_desc,
void** ref_host_output_neighbor_raw_to_unique_mapping_ptr,
wholememory_array_description_t output_neighbor_raw_to_unique_mapping_desc);
} // namespace testing
} // namespace graph_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/graph_ops/append_unique_test_utils.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "append_unique_test_utils.hpp"
#include <cstdint>
#include <gtest/gtest.h>
#include <iterator>
#include <random>
#include <unordered_map>
#include <utility>
#include <vector>
#include <wholememory/tensor_description.h>
namespace graph_ops {
namespace testing {
template <typename DataType>
void host_get_random_node_ids(void* nodes, int64_t node_count, int64_t range, bool unique)
{
DataType* nodes_ptr = static_cast<DataType*>(nodes);
std::random_device rand_dev;
std::mt19937 gen(rand_dev());
std::uniform_int_distribution<int64_t> distr(0, range);
if (!unique) {
for (int64_t i = 0; i < node_count; i++) {
nodes_ptr[i] = distr(gen);
}
} else {
std::vector<DataType> tmp_array_in(range);
for (int64_t i = 0; i < range; i++) {
tmp_array_in[i] = i;
}
std::sample(tmp_array_in.begin(), tmp_array_in.end(), nodes_ptr, node_count, gen);
}
}
void gen_node_ids(void* host_target_nodes_ptr,
wholememory_array_description_t node_desc,
int64_t range,
bool unique)
{
int64_t node_count = node_desc.size;
if (node_desc.dtype == WHOLEMEMORY_DT_INT) {
host_get_random_node_ids<int>(host_target_nodes_ptr, node_count, range, unique);
} else if (node_desc.dtype == WHOLEMEMORY_DT_INT64) {
host_get_random_node_ids<int64_t>(host_target_nodes_ptr, node_count, range, unique);
}
}
template <typename DataType>
void insert_nodes_to_append_unique_hash_table(
std::unordered_map<DataType, int>& append_unique_hash_table,
DataType* node_ptr,
int64_t node_count,
int* unique_count_ptr,
bool target)
{
if (target) {
for (int64_t i = 0; i < node_count; i++) {
DataType key = node_ptr[i];
append_unique_hash_table.insert(std::make_pair(key, i));
}
return;
} else {
int unique_count = *unique_count_ptr;
for (int64_t i = 0; i < node_count; i++) {
DataType key = node_ptr[i];
if (append_unique_hash_table.find(key) == append_unique_hash_table.end()) {
append_unique_hash_table.insert(std::make_pair(key, unique_count));
unique_count++;
}
}
*unique_count_ptr = unique_count;
}
}
template <typename DataType>
void host_get_append_unique(void* target_nodes_ptr,
wholememory_array_description_t target_nodes_desc,
void* neighbor_nodes_ptr,
wholememory_array_description_t neighbor_nodes_desc,
int* host_total_unique_count,
void** host_output_unique_nodes_ptr)
{
std::unordered_map<DataType, int> append_unique_hash_table;
int unique_count = target_nodes_desc.size;
insert_nodes_to_append_unique_hash_table<DataType>(append_unique_hash_table,
static_cast<DataType*>(target_nodes_ptr),
target_nodes_desc.size,
&unique_count,
true);
insert_nodes_to_append_unique_hash_table<DataType>(append_unique_hash_table,
static_cast<DataType*>(neighbor_nodes_ptr),
neighbor_nodes_desc.size,
&unique_count,
false);
*host_output_unique_nodes_ptr = (DataType*)malloc(unique_count * sizeof(DataType));
*host_total_unique_count = unique_count;
for (auto iter = append_unique_hash_table.begin(); iter != append_unique_hash_table.end();
iter++) {
DataType key = iter->first;
int index = iter->second;
static_cast<DataType*>(*host_output_unique_nodes_ptr)[index] = key;
}
}
void host_append_unique(void* target_nodes_ptr,
wholememory_array_description_t target_nodes_desc,
void* neighbor_nodes_ptr,
wholememory_array_description_t neighbor_nodes_desc,
int* host_total_unique_count,
void** host_output_unique_nodes_ptr)
{
EXPECT_EQ(target_nodes_desc.dtype, neighbor_nodes_desc.dtype);
if (target_nodes_desc.dtype == WHOLEMEMORY_DT_INT) {
host_get_append_unique<int>(target_nodes_ptr,
target_nodes_desc,
neighbor_nodes_ptr,
neighbor_nodes_desc,
host_total_unique_count,
host_output_unique_nodes_ptr);
} else if (target_nodes_desc.dtype == WHOLEMEMORY_DT_INT64) {
host_get_append_unique<int64_t>(target_nodes_ptr,
target_nodes_desc,
neighbor_nodes_ptr,
neighbor_nodes_desc,
host_total_unique_count,
host_output_unique_nodes_ptr);
}
}
template <typename DataType>
void host_get_append_unique_neighbor_raw_to_unique(
void* host_output_unique_nodes_ptr,
wholememory_array_description_t output_unique_nodes_desc,
void* host_neighbor_nodes_ptr,
wholememory_array_description_t neighbor_node_desc,
void** host_output_neighbor_raw_to_unique_mapping_ptr,
wholememory_array_description_t output_neighbor_raw_to_unique_mapping_desc)
{
DataType* output_unique_nodes_ptr = static_cast<DataType*>(host_output_unique_nodes_ptr);
DataType* neighbor_nodes_ptr = static_cast<DataType*>(host_neighbor_nodes_ptr);
std::unordered_map<DataType, int> unique_node_map;
for (int64_t i = 0; i < output_unique_nodes_desc.size; i++) {
DataType key = output_unique_nodes_ptr[i];
unique_node_map.insert(std::make_pair(key, i));
}
for (int64_t i = 0; i < neighbor_node_desc.size; i++) {
DataType key = neighbor_nodes_ptr[i];
static_cast<int*>(*host_output_neighbor_raw_to_unique_mapping_ptr)[i] = unique_node_map[key];
}
}
void host_gen_append_unique_neighbor_raw_to_unique(
void* host_output_unique_nodes_ptr,
wholememory_array_description_t output_unique_nodes_desc,
void* host_neighbor_nodes_ptr,
wholememory_array_description_t neighbor_nodes_desc,
void** host_output_neighbor_raw_to_unique_mapping_ptr,
wholememory_array_description_t output_neighbor_raw_to_unique_mapping_desc)
{
EXPECT_EQ(output_unique_nodes_desc.dtype, neighbor_nodes_desc.dtype);
if (*host_output_neighbor_raw_to_unique_mapping_ptr == nullptr) {
*host_output_neighbor_raw_to_unique_mapping_ptr = (void*)malloc(
wholememory_get_memory_size_from_array(&output_neighbor_raw_to_unique_mapping_desc));
}
if (output_unique_nodes_desc.dtype == WHOLEMEMORY_DT_INT) {
host_get_append_unique_neighbor_raw_to_unique<int>(
host_output_unique_nodes_ptr,
output_unique_nodes_desc,
host_neighbor_nodes_ptr,
neighbor_nodes_desc,
host_output_neighbor_raw_to_unique_mapping_ptr,
output_neighbor_raw_to_unique_mapping_desc);
} else if (output_unique_nodes_desc.dtype == WHOLEMEMORY_DT_INT64) {
host_get_append_unique_neighbor_raw_to_unique<int64_t>(
host_output_unique_nodes_ptr,
output_unique_nodes_desc,
host_neighbor_nodes_ptr,
neighbor_nodes_desc,
host_output_neighbor_raw_to_unique_mapping_ptr,
output_neighbor_raw_to_unique_mapping_desc);
}
}
} // namespace testing
} // namespace graph_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/graph_ops/csr_add_self_loop_utils.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../wholegraph_ops/graph_sampling_test_utils.hpp"
#include "csr_add_self_loop_utils.hpp"
#include <experimental/random>
#include <gtest/gtest.h>
#include <random>
#include <wholememory/graph_op.h>
#include <wholememory_ops/register.hpp>
namespace graph_ops {
namespace testing {
template <typename RowPtrType, typename ColIdType>
void host_get_local_csr_graph(int row_num,
int col_num,
int graph_edge_num,
void* host_csr_row_ptr,
wholememory_array_description_t csr_row_ptr_desc,
void* host_csr_col_ptr,
wholememory_array_description_t csr_col_ptr_desc)
{
RowPtrType* csr_row_ptr = static_cast<RowPtrType*>(host_csr_row_ptr);
ColIdType* csr_col_ptr = static_cast<ColIdType*>(host_csr_col_ptr);
int average_edge_per_row = graph_edge_num / row_num;
std::default_random_engine generator;
std::binomial_distribution<int> distribution(average_edge_per_row, 1);
int total_edge = 0;
for (int i = 0; i < row_num; i++) {
while (true) {
int random_num = distribution(generator);
if (random_num >= 0 && random_num <= col_num) {
csr_row_ptr[i] = random_num;
total_edge += random_num;
break;
}
}
}
int adjust_edge = std::abs(total_edge - graph_edge_num);
std::random_device rand_dev;
std::mt19937 gen(rand_dev());
std::uniform_int_distribution<int> distr(0, row_num - 1);
if (total_edge > graph_edge_num) {
for (int i = 0; i < adjust_edge; i++) {
while (true) {
int random_row_id = distr(gen);
if (csr_row_ptr[random_row_id] > 0) {
csr_row_ptr[random_row_id]--;
break;
}
}
}
}
if (total_edge < graph_edge_num) {
for (int i = 0; i < adjust_edge; i++) {
while (true) {
int random_row_id = distr(gen);
if (csr_row_ptr[random_row_id] < col_num) {
csr_row_ptr[random_row_id]++;
break;
}
}
}
}
wholegraph_ops::testing::host_prefix_sum_array(host_csr_row_ptr, csr_row_ptr_desc);
EXPECT_TRUE(csr_row_ptr[row_num] == graph_edge_num);
for (int i = 0; i < row_num; i++) {
int start = csr_row_ptr[i];
int end = csr_row_ptr[i + 1];
int edge_count = end - start;
if (edge_count == 0) continue;
std::vector<int> array_in(col_num);
for (int i = 0; i < col_num; i++) {
array_in[i] = i;
}
std::sample(array_in.begin(), array_in.end(), &csr_col_ptr[start], edge_count, gen);
}
}
REGISTER_DISPATCH_TWO_TYPES(HOSTGETLOCALCSRGRAPH, host_get_local_csr_graph, SINT3264, SINT3264)
template <typename DataType>
void get_random_float_array(void* host_csr_weight_ptr,
wholememory_array_description_t graph_csr_weight_ptr_desc)
{
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<DataType> dis(1.0, 20.0);
for (int64_t i = 0; i < graph_csr_weight_ptr_desc.size; i++) {
static_cast<DataType*>(host_csr_weight_ptr)[i] = (DataType)dis(gen);
}
}
void gen_local_csr_graph(int row_num,
int col_num,
int graph_edge_num,
void* host_csr_row_ptr,
wholememory_array_description_t csr_row_ptr_desc,
void* host_csr_col_ptr,
wholememory_array_description_t csr_col_ptr_desc,
void* host_csr_weight_ptr,
wholememory_array_description_t csr_weight_ptr_desc)
{
DISPATCH_TWO_TYPES(csr_row_ptr_desc.dtype,
csr_col_ptr_desc.dtype,
HOSTGETLOCALCSRGRAPH,
row_num,
col_num,
graph_edge_num,
host_csr_row_ptr,
csr_row_ptr_desc,
host_csr_col_ptr,
csr_col_ptr_desc);
if (host_csr_weight_ptr != nullptr) {
if (csr_weight_ptr_desc.dtype == WHOLEMEMORY_DT_FLOAT) {
get_random_float_array<float>(host_csr_weight_ptr, csr_weight_ptr_desc);
} else if (csr_weight_ptr_desc.dtype == WHOLEMEMORY_DT_DOUBLE) {
get_random_float_array<double>(host_csr_weight_ptr, csr_weight_ptr_desc);
}
}
}
void host_get_csr_add_self_loop(int* host_csr_row_ptr,
wholememory_array_description_t csr_row_ptr_array_desc,
int* host_csr_col_ptr,
wholememory_array_description_t csr_col_ptr_array_desc,
int* host_ref_output_csr_row_ptr,
wholememory_array_description_t output_csr_row_ptr_array_desc,
int* host_ref_output_csr_col_ptr,
wholememory_array_description_t output_csr_col_ptr_array_desc)
{
for (int64_t row_id = 0; row_id < csr_row_ptr_array_desc.size - 1; row_id++) {
int start = host_csr_row_ptr[row_id];
int end = host_csr_row_ptr[row_id + 1];
host_ref_output_csr_row_ptr[row_id] = start + row_id;
host_ref_output_csr_col_ptr[start + row_id] = row_id;
for (int64_t j = start; j < end; j++) {
host_ref_output_csr_col_ptr[j + row_id + 1] = host_csr_col_ptr[j];
}
}
host_ref_output_csr_row_ptr[csr_row_ptr_array_desc.size - 1] =
host_csr_row_ptr[csr_row_ptr_array_desc.size - 1] + csr_row_ptr_array_desc.size - 1;
}
void host_csr_add_self_loop(void* host_csr_row_ptr,
wholememory_array_description_t csr_row_ptr_array_desc,
void* host_csr_col_ptr,
wholememory_array_description_t csr_col_ptr_array_desc,
void* host_ref_output_csr_row_ptr,
wholememory_array_description_t output_csr_row_ptr_array_desc,
void* host_ref_output_csr_col_ptr,
wholememory_array_description_t output_csr_col_ptr_array_desc)
{
EXPECT_EQ(csr_row_ptr_array_desc.dtype, WHOLEMEMORY_DT_INT);
EXPECT_EQ(csr_col_ptr_array_desc.dtype, WHOLEMEMORY_DT_INT);
EXPECT_EQ(output_csr_row_ptr_array_desc.dtype, WHOLEMEMORY_DT_INT);
EXPECT_EQ(output_csr_col_ptr_array_desc.dtype, WHOLEMEMORY_DT_INT);
EXPECT_EQ(csr_row_ptr_array_desc.size, output_csr_row_ptr_array_desc.size);
EXPECT_EQ(csr_col_ptr_array_desc.size + csr_row_ptr_array_desc.size - 1,
output_csr_col_ptr_array_desc.size);
host_get_csr_add_self_loop(static_cast<int*>(host_csr_row_ptr),
csr_row_ptr_array_desc,
static_cast<int*>(host_csr_col_ptr),
csr_col_ptr_array_desc,
static_cast<int*>(host_ref_output_csr_row_ptr),
output_csr_row_ptr_array_desc,
static_cast<int*>(host_ref_output_csr_col_ptr),
output_csr_col_ptr_array_desc);
}
} // namespace testing
} // namespace graph_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/graph_ops/csr_add_self_loop_utils.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/tensor_description.h>
namespace graph_ops {
namespace testing {
void gen_local_csr_graph(
int row_num,
int col_num,
int graph_edge_num,
void* host_csr_row_ptr,
wholememory_array_description_t csr_row_ptr_desc,
void* host_csr_col_ptr,
wholememory_array_description_t csr_col_ptr_desc,
void* host_csr_weight_ptr = nullptr,
wholememory_array_description_t csr_weight_ptr_desc = wholememory_array_description_t{});
void host_csr_add_self_loop(void* host_csr_row_ptr,
wholememory_array_description_t csr_row_ptr_array_desc,
void* host_csr_col_ptr,
wholememory_array_description_t csr_col_ptr_array_desc,
void* host_ref_output_csr_row_ptr,
wholememory_array_description_t output_csr_row_ptr_array_desc,
void* host_ref_output_csr_col_ptr,
wholememory_array_description_t output_csr_col_ptr_array_desc);
} // namespace testing
} // namespace graph_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/wholegraph_ops/graph_sampling_test_utils.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "graph_sampling_test_utils.hpp"
#include <algorithm>
#include <experimental/random>
#include <gtest/gtest.h>
#include <iterator>
#include <queue>
#include <random>
#include <vector>
#include <raft/random/rng_device.cuh>
#include <raft/random/rng_state.hpp>
#include <wholememory_ops/register.hpp>
namespace wholegraph_ops {
namespace testing {
template <typename DataType>
void host_get_csr_graph(int64_t graph_node_count,
int64_t graph_edge_count,
void* host_csr_row_ptr,
wholememory_array_description_t graph_csr_row_ptr_desc,
void* host_csr_col_ptr,
wholememory_array_description_t graph_csr_col_ptr_desc)
{
int64_t* csr_row_ptr = static_cast<int64_t*>(host_csr_row_ptr);
DataType* csr_col_ptr = static_cast<DataType*>(host_csr_col_ptr);
int64_t average_edge_per_node = graph_edge_count / graph_node_count;
std::default_random_engine generator;
std::binomial_distribution<int64_t> distribution(average_edge_per_node, 1);
int total_edge = 0;
for (int64_t i = 0; i < graph_node_count; i++) {
while (true) {
int64_t random_num = distribution(generator);
if (random_num >= 0 && random_num <= graph_node_count) {
csr_row_ptr[i] = random_num;
total_edge += random_num;
break;
}
}
}
int64_t adjust_edge = std::abs(total_edge - graph_edge_count);
std::random_device rand_dev;
std::mt19937 gen(rand_dev());
std::uniform_int_distribution<int64_t> distr(0, graph_node_count - 1);
if (total_edge > graph_edge_count) {
for (int64_t i = 0; i < adjust_edge; i++) {
while (true) {
int64_t random_row_id = distr(gen);
if (csr_row_ptr[random_row_id] > 0) {
csr_row_ptr[random_row_id]--;
break;
}
}
}
}
if (total_edge < graph_edge_count) {
for (int64_t i = 0; i < adjust_edge; i++) {
while (true) {
int64_t random_row_id = distr(gen);
if (csr_row_ptr[random_row_id] < graph_node_count) {
csr_row_ptr[random_row_id]++;
break;
}
}
}
}
host_prefix_sum_array(host_csr_row_ptr, graph_csr_row_ptr_desc);
EXPECT_TRUE(csr_row_ptr[graph_node_count] == graph_edge_count);
for (int64_t i = 0; i < graph_node_count; i++) {
int64_t start = csr_row_ptr[i];
int64_t end = csr_row_ptr[i + 1];
int64_t edge_count = end - start;
if (edge_count == 0) continue;
std::vector<int64_t> array_out(edge_count);
std::vector<int64_t> array_in(graph_node_count);
for (int64_t i = 0; i < graph_node_count; i++) {
array_in[i] = i;
}
std::sample(array_in.begin(), array_in.end(), array_out.begin(), edge_count, gen);
for (int j = 0; j < edge_count; j++) {
csr_col_ptr[start + j] = (DataType)array_out[j];
}
}
}
template <typename DataType>
void host_get_csr_weight_graph(void* host_csr_weight_ptr,
wholememory_array_description_t graph_csr_weight_ptr_desc)
{
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<DataType> dis(1.0, 20.0);
for (int64_t i = 0; i < graph_csr_weight_ptr_desc.size; i++) {
static_cast<DataType*>(host_csr_weight_ptr)[i] = (DataType)dis(gen);
}
}
void gen_csr_graph(int64_t graph_node_count,
int64_t graph_edge_count,
void* host_csr_row_ptr,
wholememory_array_description_t graph_csr_row_ptr_desc,
void* host_csr_col_ptr,
wholememory_array_description_t graph_csr_col_ptr_desc,
void* host_csr_weight_ptr,
wholememory_array_description_t graph_csr_weight_ptr_desc)
{
EXPECT_TRUE(graph_csr_row_ptr_desc.dtype == WHOLEMEMORY_DT_INT64);
if (graph_csr_col_ptr_desc.dtype == WHOLEMEMORY_DT_INT64) {
host_get_csr_graph<int64_t>(graph_node_count,
graph_edge_count,
host_csr_row_ptr,
graph_csr_row_ptr_desc,
host_csr_col_ptr,
graph_csr_col_ptr_desc);
} else if (graph_csr_col_ptr_desc.dtype == WHOLEMEMORY_DT_INT) {
host_get_csr_graph<int>(graph_node_count,
graph_edge_count,
host_csr_row_ptr,
graph_csr_row_ptr_desc,
host_csr_col_ptr,
graph_csr_col_ptr_desc);
}
if (host_csr_weight_ptr != nullptr) {
if (graph_csr_weight_ptr_desc.dtype == WHOLEMEMORY_DT_FLOAT) {
host_get_csr_weight_graph<float>(host_csr_weight_ptr, graph_csr_weight_ptr_desc);
} else if (graph_csr_weight_ptr_desc.dtype == WHOLEMEMORY_DT_DOUBLE) {
host_get_csr_weight_graph<double>(host_csr_weight_ptr, graph_csr_weight_ptr_desc);
}
}
}
template <typename DataType>
void host_get_random_array(void* array,
wholememory_array_description_t array_desc,
int64_t low,
int64_t high)
{
DataType* array_ptr = static_cast<DataType*>(array);
std::experimental::reseed();
for (int64_t i = 0; i < array_desc.size; i++) {
DataType random_num = std::experimental::randint<DataType>(low, high);
array_ptr[i + array_desc.storage_offset] = random_num;
}
}
void host_random_init_array(void* array,
wholememory_array_description_t array_desc,
int64_t low,
int64_t high)
{
EXPECT_TRUE(array_desc.dtype == WHOLEMEMORY_DT_INT || array_desc.dtype == WHOLEMEMORY_DT_INT64);
if (array_desc.dtype == WHOLEMEMORY_DT_INT) {
host_get_random_array<int>(array, array_desc, low, high);
} else {
host_get_random_array<int64_t>(array, array_desc, low, high);
}
}
template <typename DataType>
void host_get_prefix_sum_array(void* array, wholememory_array_description_t array_desc)
{
DataType* array_ptr = static_cast<DataType*>(array);
if (array_desc.size <= 0) return;
DataType old_value = array_ptr[0];
array_ptr[0] = 0;
for (int64_t i = 1; i < array_desc.size; i++) {
DataType tmp = array_ptr[i];
array_ptr[i] = array_ptr[i - 1] + old_value;
old_value = tmp;
}
}
void host_prefix_sum_array(void* array, wholememory_array_description_t array_desc)
{
EXPECT_TRUE(array_desc.dtype == WHOLEMEMORY_DT_INT || array_desc.dtype == WHOLEMEMORY_DT_INT64);
if (array_desc.dtype == WHOLEMEMORY_DT_INT) {
host_get_prefix_sum_array<int>(array, array_desc);
} else {
host_get_prefix_sum_array<int64_t>(array, array_desc);
}
}
void copy_host_array_to_wholememory(void* host_array,
wholememory_handle_t array_handle,
wholememory_array_description_t array_desc,
cudaStream_t stream)
{
void* local_array_ptr;
size_t local_array_size, local_array_offset;
EXPECT_EQ(wholememory_get_local_memory(
&local_array_ptr, &local_array_size, &local_array_offset, array_handle),
WHOLEMEMORY_SUCCESS);
int64_t array_ele_size = wholememory_dtype_get_element_size(array_desc.dtype);
EXPECT_EQ(local_array_size % array_ele_size, 0);
EXPECT_EQ(local_array_offset % array_ele_size, 0);
wholememory_comm_t wm_comm;
EXPECT_EQ(wholememory_get_communicator(&wm_comm, array_handle), WHOLEMEMORY_SUCCESS);
if (local_array_size) {
EXPECT_EQ(cudaMemcpyAsync(local_array_ptr,
static_cast<char*>(host_array) + local_array_offset,
local_array_size,
cudaMemcpyHostToDevice,
stream),
cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
}
wholememory_communicator_barrier(wm_comm);
}
template <typename DataType>
void host_get_sample_offset(void* host_csr_row_ptr,
wholememory_array_description_t csr_row_ptr_desc,
void* host_center_nodes,
wholememory_array_description_t center_node_desc,
int max_sample_count,
void* host_ref_output_sample_offset,
wholememory_array_description_t output_sample_offset_desc)
{
EXPECT_EQ(csr_row_ptr_desc.dtype, WHOLEMEMORY_DT_INT64);
EXPECT_EQ(output_sample_offset_desc.dtype, WHOLEMEMORY_DT_INT);
int64_t* csr_row_ptr = static_cast<int64_t*>(host_csr_row_ptr);
DataType* center_nodes_ptr = static_cast<DataType*>(host_center_nodes);
int* output_sample_offset_ptr = static_cast<int*>(host_ref_output_sample_offset);
for (int64_t i = 0; i < center_node_desc.size; i++) {
DataType center_node_id = center_nodes_ptr[i];
int neighbor_node_count = csr_row_ptr[center_node_id + 1] - csr_row_ptr[center_node_id];
if (max_sample_count > 0) {
neighbor_node_count = std::min(neighbor_node_count, max_sample_count);
}
output_sample_offset_ptr[i] = neighbor_node_count;
}
}
template <typename IdType, typename ColIdType>
void host_sample_all(void* host_csr_row_ptr,
wholememory_array_description_t csr_row_ptr_desc,
void* host_csr_col_ptr,
wholememory_array_description_t csr_col_ptr_desc,
void* host_center_nodes,
wholememory_array_description_t center_node_desc,
int max_sample_count,
void* host_ref_output_sample_offset,
wholememory_array_description_t output_sample_offset_desc,
void* host_ref_output_dest_nodes,
void* host_ref_output_center_nodes_local_id,
void* host_ref_output_global_edge_id)
{
EXPECT_EQ(csr_row_ptr_desc.dtype, WHOLEMEMORY_DT_INT64);
EXPECT_EQ(output_sample_offset_desc.dtype, WHOLEMEMORY_DT_INT);
int64_t* csr_row_ptr = static_cast<int64_t*>(host_csr_row_ptr);
ColIdType* csr_col_ptr = static_cast<ColIdType*>(host_csr_col_ptr);
IdType* center_nodes_ptr = static_cast<IdType*>(host_center_nodes);
int* output_sample_offset_ptr = static_cast<int*>(host_ref_output_sample_offset);
ColIdType* output_dest_nodes_ptr = static_cast<ColIdType*>(host_ref_output_dest_nodes);
int* output_center_nodes_local_id_ptr = static_cast<int*>(host_ref_output_center_nodes_local_id);
int64_t* output_global_edge_id_ptr = static_cast<int64_t*>(host_ref_output_global_edge_id);
int64_t center_nodes_count = center_node_desc.size;
for (int64_t i = 0; i < center_nodes_count; i++) {
int output_id = output_sample_offset_ptr[i];
int output_local_id = 0;
IdType center_node_id = center_nodes_ptr[i];
for (int64_t j = csr_row_ptr[center_node_id]; j < csr_row_ptr[center_node_id + 1]; j++) {
output_dest_nodes_ptr[output_id + output_local_id] = csr_col_ptr[j];
output_center_nodes_local_id_ptr[output_id + output_local_id] = (int)i;
output_global_edge_id_ptr[output_id + output_local_id] = j;
output_local_id++;
}
}
}
REGISTER_DISPATCH_TWO_TYPES(HOSTSAMPLEALL, host_sample_all, SINT3264, SINT3264)
template <int Offset = 0>
void random_sample_without_replacement_cpu_base(std::vector<int>* a,
const std::vector<int32_t>& r,
int M,
int N)
{
a->resize(M + Offset);
std::vector<int> Q(N + Offset);
for (int i = Offset; i < N + Offset; ++i) {
Q[i] = i;
}
for (int i = Offset; i < M + Offset; ++i) {
a->at(i) = Q[r[i]];
Q[r[i]] = Q[N - i + 2 * Offset - 1];
}
}
template <typename IdType, typename ColIdType>
void host_unweighted_sample_without_replacement(
void* host_csr_row_ptr,
wholememory_array_description_t csr_row_ptr_desc,
void* host_csr_col_ptr,
wholememory_array_description_t csr_col_ptr_desc,
void* host_center_nodes,
wholememory_array_description_t center_node_desc,
int max_sample_count,
void* host_ref_output_sample_offset,
wholememory_array_description_t output_sample_offset_desc,
void* host_ref_output_dest_nodes,
void* host_ref_output_center_nodes_local_id,
void* host_ref_output_global_edge_id,
unsigned long long random_seed)
{
EXPECT_EQ(csr_row_ptr_desc.dtype, WHOLEMEMORY_DT_INT64);
EXPECT_EQ(output_sample_offset_desc.dtype, WHOLEMEMORY_DT_INT);
int64_t* csr_row_ptr = static_cast<int64_t*>(host_csr_row_ptr);
ColIdType* csr_col_ptr = static_cast<ColIdType*>(host_csr_col_ptr);
IdType* center_nodes_ptr = static_cast<IdType*>(host_center_nodes);
int* output_sample_offset_ptr = static_cast<int*>(host_ref_output_sample_offset);
ColIdType* output_dest_nodes_ptr = static_cast<ColIdType*>(host_ref_output_dest_nodes);
int* output_center_nodes_local_id_ptr = static_cast<int*>(host_ref_output_center_nodes_local_id);
int64_t* output_global_edge_id_ptr = static_cast<int64_t*>(host_ref_output_global_edge_id);
int64_t center_nodes_count = center_node_desc.size;
int M = max_sample_count;
static const int warp_count_array[32] = {1, 1, 1, 2, 2, 2, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8};
static const int items_per_thread_array[32] = {1, 2, 3, 2, 3, 3, 2, 2, 3, 3, 3, 3, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4};
int func_idx = (max_sample_count - 1) / 32;
int device_num_threads = warp_count_array[func_idx] * 32;
int items_per_thread = items_per_thread_array[func_idx];
for (int64_t i = 0; i < center_nodes_count; i++) {
int output_id = output_sample_offset_ptr[i];
int output_local_id = 0;
IdType center_node_id = center_nodes_ptr[i];
int64_t start = csr_row_ptr[center_node_id];
int64_t end = csr_row_ptr[center_node_id + 1];
int64_t neighbor_count = end - start;
int N = neighbor_count;
int blockidx = i;
int gidx = blockidx * device_num_threads;
if (neighbor_count <= 0) continue;
if (neighbor_count <= max_sample_count) {
for (int64_t j = start; j < end; j++) {
output_dest_nodes_ptr[output_id + output_local_id] = csr_col_ptr[j];
output_center_nodes_local_id_ptr[output_id + output_local_id] = (int)i;
output_global_edge_id_ptr[output_id + output_local_id] = j;
output_local_id++;
}
} else {
std::vector<int32_t> r(neighbor_count);
for (int j = 0; j < device_num_threads; j++) {
int local_gidx = gidx + j;
raft::random::RngState _rngstate(random_seed, 0, raft::random::GeneratorType::GenPC);
raft::random::detail::DeviceState<raft::random::detail::PCGenerator> rngstate(_rngstate);
raft::random::detail::PCGenerator rng(rngstate, (uint64_t)local_gidx);
raft::random::detail::UniformDistParams<int32_t> params;
params.start = 0;
params.end = 1;
for (int k = 0; k < items_per_thread; k++) {
int id = k * device_num_threads + j;
int32_t random_num;
raft::random::detail::custom_next(rng, &random_num, params, 0, 0);
if (id < neighbor_count) { r[id] = id < M ? (random_num % (N - id)) : N; }
}
}
std::vector<int> random_sample_id(max_sample_count, 0);
random_sample_without_replacement_cpu_base(&random_sample_id, r, M, N);
for (int sample_id = 0; sample_id < M; sample_id++) {
output_dest_nodes_ptr[output_id + sample_id] =
csr_col_ptr[start + random_sample_id[sample_id]];
output_center_nodes_local_id_ptr[output_id + sample_id] = i;
output_global_edge_id_ptr[output_id + sample_id] = start + random_sample_id[sample_id];
}
}
}
}
REGISTER_DISPATCH_TWO_TYPES(HOSTUNWEIGHTEDSAMPLEWITHOUTREPLACEMENT,
host_unweighted_sample_without_replacement,
SINT3264,
SINT3264)
void wholegraph_csr_unweighted_sample_without_replacement_cpu(
void* host_csr_row_ptr,
wholememory_array_description_t csr_row_ptr_desc,
void* host_csr_col_ptr,
wholememory_array_description_t csr_col_ptr_desc,
void* host_center_nodes,
wholememory_array_description_t center_node_desc,
int max_sample_count,
void** host_ref_output_sample_offset,
wholememory_array_description_t output_sample_offset_desc,
void** host_ref_output_dest_nodes,
void** host_ref_output_center_nodes_local_id,
void** host_ref_output_global_edge_id,
int* output_sample_dest_nodes_count,
unsigned long long random_seed)
{
EXPECT_EQ(csr_row_ptr_desc.dtype, WHOLEMEMORY_DT_INT64);
EXPECT_EQ(output_sample_offset_desc.dtype, WHOLEMEMORY_DT_INT);
EXPECT_EQ(output_sample_offset_desc.size, center_node_desc.size + 1);
*host_ref_output_sample_offset =
(void*)malloc(wholememory_get_memory_size_from_array(&output_sample_offset_desc));
if (center_node_desc.dtype == WHOLEMEMORY_DT_INT64) {
host_get_sample_offset<int64_t>(host_csr_row_ptr,
csr_row_ptr_desc,
host_center_nodes,
center_node_desc,
max_sample_count,
*host_ref_output_sample_offset,
output_sample_offset_desc);
} else if (center_node_desc.dtype == WHOLEMEMORY_DT_INT) {
host_get_sample_offset<int>(host_csr_row_ptr,
csr_row_ptr_desc,
host_center_nodes,
center_node_desc,
max_sample_count,
*host_ref_output_sample_offset,
output_sample_offset_desc);
}
host_prefix_sum_array(*host_ref_output_sample_offset, output_sample_offset_desc);
*output_sample_dest_nodes_count =
static_cast<int*>(*host_ref_output_sample_offset)[center_node_desc.size];
*host_ref_output_dest_nodes = malloc((*output_sample_dest_nodes_count) *
wholememory_dtype_get_element_size(csr_col_ptr_desc.dtype));
*host_ref_output_center_nodes_local_id = malloc((*output_sample_dest_nodes_count) * sizeof(int));
*host_ref_output_global_edge_id = malloc((*output_sample_dest_nodes_count) * sizeof(int64_t));
if (max_sample_count <= 0) {
DISPATCH_TWO_TYPES(center_node_desc.dtype,
csr_col_ptr_desc.dtype,
HOSTSAMPLEALL,
host_csr_row_ptr,
csr_row_ptr_desc,
host_csr_col_ptr,
csr_col_ptr_desc,
host_center_nodes,
center_node_desc,
max_sample_count,
*host_ref_output_sample_offset,
output_sample_offset_desc,
*host_ref_output_dest_nodes,
*host_ref_output_center_nodes_local_id,
*host_ref_output_global_edge_id);
return;
}
if (max_sample_count > 1024) { return; }
DISPATCH_TWO_TYPES(center_node_desc.dtype,
csr_col_ptr_desc.dtype,
HOSTUNWEIGHTEDSAMPLEWITHOUTREPLACEMENT,
host_csr_row_ptr,
csr_row_ptr_desc,
host_csr_col_ptr,
csr_col_ptr_desc,
host_center_nodes,
center_node_desc,
max_sample_count,
*host_ref_output_sample_offset,
output_sample_offset_desc,
*host_ref_output_dest_nodes,
*host_ref_output_center_nodes_local_id,
*host_ref_output_global_edge_id,
random_seed);
}
template <typename DataType>
void check_value_same(void* value, void* ref, int64_t size)
{
int64_t diff_count = 0;
DataType* value_ptr = static_cast<DataType*>(value);
DataType* ref_ptr = static_cast<DataType*>(ref);
for (int i = 0; i < size; i++) {
if (value_ptr[i] != ref_ptr[i]) {
if (diff_count < 10 * 1000 * 1000) {
printf("i=%d, value = %ld, ref = %ld\n",
i,
static_cast<int64_t>(value_ptr[i]),
static_cast<int64_t>(ref_ptr[i]));
EXPECT_EQ(value_ptr[i], ref_ptr[i]);
}
diff_count++;
}
}
}
REGISTER_DISPATCH_ONE_TYPE(CHECKVALUESAME, check_value_same, SINT3264)
void host_check_two_array_same(void* host_array,
wholememory_array_description_t host_array_desc,
void* host_ref,
wholememory_array_description_t host_ref_desc)
{
EXPECT_EQ(host_array_desc.dtype, host_ref_desc.dtype);
EXPECT_EQ(host_array_desc.size, host_ref_desc.size);
DISPATCH_ONE_TYPE(
host_array_desc.dtype, CHECKVALUESAME, host_array, host_ref, host_array_desc.size);
}
inline int count_one(unsigned long long num)
{
int c = 0;
while (num) {
num >>= 1;
c++;
}
return 64 - c;
}
template <typename WeightType>
float host_gen_key_from_weight(const WeightType weight, raft::random::detail::PCGenerator& rng)
{
float u = 0.0;
rng.next(u);
u = -(0.5 + 0.5 * u);
uint64_t random_num2 = 0;
int seed_count = -1;
do {
rng.next(random_num2);
seed_count++;
} while (!random_num2);
int one_bit = count_one(random_num2) + seed_count * 64;
u *= pow(2, -one_bit);
// float logk = (log1pf(u) / logf(2.0)) * (1.0f / (float)weight);
float logk = (1 / weight) * (log1p(u) / log(2.0));
// u = random_uniform(0,1), logk = 1/weight *logf(u)
return logk;
}
template <typename IdType, typename ColIdType, typename WeightType>
void host_weighted_sample_without_replacement(
void* host_csr_row_ptr,
wholememory_array_description_t csr_row_ptr_desc,
void* host_csr_col_ptr,
wholememory_array_description_t csr_col_ptr_desc,
void* host_csr_weight_ptr,
wholememory_array_description_t csr_weight_ptr_desc,
void* host_center_nodes,
wholememory_array_description_t center_node_desc,
int max_sample_count,
void* host_ref_output_sample_offset,
wholememory_array_description_t output_sample_offset_desc,
void* host_ref_output_dest_nodes,
void* host_ref_output_center_nodes_local_id,
void* host_ref_output_global_edge_id,
unsigned long long random_seed)
{
EXPECT_EQ(csr_row_ptr_desc.dtype, WHOLEMEMORY_DT_INT64);
EXPECT_EQ(output_sample_offset_desc.dtype, WHOLEMEMORY_DT_INT);
int64_t* csr_row_ptr = static_cast<int64_t*>(host_csr_row_ptr);
ColIdType* csr_col_ptr = static_cast<ColIdType*>(host_csr_col_ptr);
WeightType* csr_weight_ptr = static_cast<WeightType*>(host_csr_weight_ptr);
IdType* center_nodes_ptr = static_cast<IdType*>(host_center_nodes);
int* output_sample_offset_ptr = static_cast<int*>(host_ref_output_sample_offset);
ColIdType* output_dest_nodes_ptr = static_cast<ColIdType*>(host_ref_output_dest_nodes);
int* output_center_nodes_local_id_ptr = static_cast<int*>(host_ref_output_center_nodes_local_id);
int64_t* output_global_edge_id_ptr = static_cast<int64_t*>(host_ref_output_global_edge_id);
int64_t center_nodes_count = center_node_desc.size;
int block_size = 128;
if (max_sample_count > 256) { block_size = 256; }
for (int64_t i = 0; i < center_nodes_count; i++) {
int output_id = output_sample_offset_ptr[i];
int output_local_id = 0;
IdType center_node_id = center_nodes_ptr[i];
int64_t start = csr_row_ptr[center_node_id];
int64_t end = csr_row_ptr[center_node_id + 1];
int64_t neighbor_count = end - start;
int blockidx = i;
int gidx = blockidx * block_size;
if (neighbor_count <= 0) continue;
if (neighbor_count <= max_sample_count) {
for (int64_t j = start; j < end; j++) {
output_dest_nodes_ptr[output_id + output_local_id] = csr_col_ptr[j];
output_center_nodes_local_id_ptr[output_id + output_local_id] = (int)i;
output_global_edge_id_ptr[output_id + output_local_id] = j;
output_local_id++;
}
} else {
int process_count = 0;
struct cmp {
bool operator()(std::pair<int, WeightType> left, std::pair<int, WeightType> right)
{
return (left.second) > (right.second);
}
};
std::priority_queue<std::pair<int, WeightType>, std::vector<std::pair<int, WeightType>>, cmp>
small_heap;
auto consume_fun = [&](int id, raft::random::detail::PCGenerator& rng) {
WeightType edge_weight = csr_weight_ptr[start + id];
WeightType weight = host_gen_key_from_weight(edge_weight, rng);
process_count++;
if (process_count <= max_sample_count) {
small_heap.push(std::make_pair(id, weight));
} else {
std::pair<int, WeightType> small_heap_top_ele = small_heap.top();
if (small_heap_top_ele.second < weight) {
small_heap.pop();
small_heap.push(std::make_pair(id, weight));
}
}
};
for (int j = 0; j < block_size; j++) {
int local_gidx = gidx + j;
raft::random::RngState _rngstate(random_seed, 0, raft::random::GeneratorType::GenPC);
raft::random::detail::DeviceState<raft::random::detail::PCGenerator> rngstate(_rngstate);
raft::random::detail::PCGenerator rng(rngstate, (uint64_t)local_gidx);
for (int id = j; id < neighbor_count; id += block_size) {
if (id < neighbor_count) { consume_fun(id, rng); }
}
}
for (int sample_id = 0; sample_id < max_sample_count; sample_id++) {
output_dest_nodes_ptr[output_id + sample_id] = csr_col_ptr[start + small_heap.top().first];
output_center_nodes_local_id_ptr[output_id + sample_id] = i;
output_global_edge_id_ptr[output_id + sample_id] = start + small_heap.top().first;
small_heap.pop();
}
}
}
}
REGISTER_DISPATCH_THREE_TYPES(HOSTWEIGHTEDSAMPLEWITHOUTREPLACEMENT,
host_weighted_sample_without_replacement,
SINT3264,
SINT3264,
FLOAT_DOUBLE)
void wholegraph_csr_weighted_sample_without_replacement_cpu(
void* host_csr_row_ptr,
wholememory_array_description_t csr_row_ptr_desc,
void* host_csr_col_ptr,
wholememory_array_description_t csr_col_ptr_desc,
void* host_csr_weight_ptr,
wholememory_array_description_t csr_weight_ptr_desc,
void* host_center_nodes,
wholememory_array_description_t center_node_desc,
int max_sample_count,
void** host_ref_output_sample_offset,
wholememory_array_description_t output_sample_offset_desc,
void** host_ref_output_dest_nodes,
void** host_ref_output_center_nodes_local_id,
void** host_ref_output_global_edge_id,
int* output_sample_dest_nodes_count,
unsigned long long random_seed)
{
EXPECT_EQ(csr_row_ptr_desc.dtype, WHOLEMEMORY_DT_INT64);
EXPECT_EQ(output_sample_offset_desc.dtype, WHOLEMEMORY_DT_INT);
EXPECT_EQ(output_sample_offset_desc.size, center_node_desc.size + 1);
*host_ref_output_sample_offset =
(void*)malloc(wholememory_get_memory_size_from_array(&output_sample_offset_desc));
if (center_node_desc.dtype == WHOLEMEMORY_DT_INT64) {
host_get_sample_offset<int64_t>(host_csr_row_ptr,
csr_row_ptr_desc,
host_center_nodes,
center_node_desc,
max_sample_count,
*host_ref_output_sample_offset,
output_sample_offset_desc);
} else if (center_node_desc.dtype == WHOLEMEMORY_DT_INT) {
host_get_sample_offset<int>(host_csr_row_ptr,
csr_row_ptr_desc,
host_center_nodes,
center_node_desc,
max_sample_count,
*host_ref_output_sample_offset,
output_sample_offset_desc);
}
host_prefix_sum_array(*host_ref_output_sample_offset, output_sample_offset_desc);
*output_sample_dest_nodes_count =
static_cast<int*>(*host_ref_output_sample_offset)[center_node_desc.size];
*host_ref_output_dest_nodes = malloc((*output_sample_dest_nodes_count) *
wholememory_dtype_get_element_size(csr_col_ptr_desc.dtype));
*host_ref_output_center_nodes_local_id = malloc((*output_sample_dest_nodes_count) * sizeof(int));
*host_ref_output_global_edge_id = malloc((*output_sample_dest_nodes_count) * sizeof(int64_t));
if (max_sample_count <= 0) {
DISPATCH_TWO_TYPES(center_node_desc.dtype,
csr_col_ptr_desc.dtype,
HOSTSAMPLEALL,
host_csr_row_ptr,
csr_row_ptr_desc,
host_csr_col_ptr,
csr_col_ptr_desc,
host_center_nodes,
center_node_desc,
max_sample_count,
*host_ref_output_sample_offset,
output_sample_offset_desc,
*host_ref_output_dest_nodes,
*host_ref_output_center_nodes_local_id,
*host_ref_output_global_edge_id);
return;
}
if (max_sample_count > 1024) { return; }
DISPATCH_THREE_TYPES(center_node_desc.dtype,
csr_col_ptr_desc.dtype,
csr_weight_ptr_desc.dtype,
HOSTWEIGHTEDSAMPLEWITHOUTREPLACEMENT,
host_csr_row_ptr,
csr_row_ptr_desc,
host_csr_col_ptr,
csr_col_ptr_desc,
host_csr_weight_ptr,
csr_weight_ptr_desc,
host_center_nodes,
center_node_desc,
max_sample_count,
*host_ref_output_sample_offset,
output_sample_offset_desc,
*host_ref_output_dest_nodes,
*host_ref_output_center_nodes_local_id,
*host_ref_output_global_edge_id,
random_seed);
}
template <typename DataType>
void host_get_segment_sort(void* host_output_sample_offset,
wholememory_array_description_t output_sample_offset_desc,
void* host_output_dest_nodes,
wholememory_array_description_t output_dest_nodes_desc,
void* host_output_global_edge_id,
wholememory_array_description_t output_global_edge_id_desc)
{
int* output_sample_offset_ptr = static_cast<int*>(host_output_sample_offset);
DataType* output_dest_nodes_ptr = static_cast<DataType*>(host_output_dest_nodes);
int64_t* output_global_edge_id_ptr = static_cast<int64_t*>(host_output_global_edge_id);
for (int64_t i = 0; i < output_sample_offset_desc.size - 1; i++) {
int start = output_sample_offset_ptr[i];
int end = output_sample_offset_ptr[i + 1];
std::sort(output_dest_nodes_ptr + start, output_dest_nodes_ptr + end);
std::sort(output_global_edge_id_ptr + start, output_global_edge_id_ptr + end);
}
}
void segment_sort_output(void* host_output_sample_offset,
wholememory_array_description_t output_sample_offset_desc,
void* host_output_dest_nodes,
wholememory_array_description_t output_dest_nodes_desc,
void* host_output_global_edge_id,
wholememory_array_description_t output_global_edge_id_desc)
{
EXPECT_EQ(output_sample_offset_desc.dtype, WHOLEMEMORY_DT_INT);
EXPECT_EQ(output_global_edge_id_desc.dtype, WHOLEMEMORY_DT_INT64);
if (output_dest_nodes_desc.dtype == WHOLEMEMORY_DT_INT) {
host_get_segment_sort<int>(host_output_sample_offset,
output_sample_offset_desc,
host_output_dest_nodes,
output_dest_nodes_desc,
host_output_global_edge_id,
output_global_edge_id_desc);
} else if (output_dest_nodes_desc.dtype == WHOLEMEMORY_DT_INT64) {
host_get_segment_sort<int64_t>(host_output_sample_offset,
output_sample_offset_desc,
host_output_dest_nodes,
output_dest_nodes_desc,
host_output_global_edge_id,
output_global_edge_id_desc);
}
}
} // namespace testing
} // namespace wholegraph_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/wholegraph_ops/graph_sampling_test_utils.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/env_func_ptrs.h>
#include <wholememory/tensor_description.h>
#include <wholememory/wholememory.h>
namespace wholegraph_ops {
namespace testing {
void host_random_init_array(void* array,
wholememory_array_description_t array_desc,
int64_t low,
int64_t high);
void host_prefix_sum_array(void* array, wholememory_array_description_t array_desc);
void copy_host_array_to_wholememory(void* host_array,
wholememory_handle_t array_handle,
wholememory_array_description_t array_desc,
cudaStream_t stream);
void wholegraph_csr_unweighted_sample_without_replacement_cpu(
void* host_csr_row_ptr,
wholememory_array_description_t csr_row_ptr_desc,
void* host_csr_col_ptr,
wholememory_array_description_t csr_col_ptr_desc,
void* host_center_nodes,
wholememory_array_description_t center_node_desc,
int max_sample_count,
void** host_ref_output_sample_offset,
wholememory_array_description_t output_sample_offset_desc,
void** host_ref_output_dest_nodes,
void** host_ref_output_center_nodes_local_id,
void** host_ref_output_global_edge_id,
int* output_sample_dest_nodes_count,
unsigned long long random_seed);
void wholegraph_csr_weighted_sample_without_replacement_cpu(
void* host_csr_row_ptr,
wholememory_array_description_t csr_row_ptr_desc,
void* host_csr_col_ptr,
wholememory_array_description_t csr_col_ptr_desc,
void* host_csr_weight_ptr,
wholememory_array_description_t csr_weight_ptr_desc,
void* host_center_nodes,
wholememory_array_description_t center_node_desc,
int max_sample_count,
void** host_ref_output_sample_offset,
wholememory_array_description_t output_sample_offset_desc,
void** host_ref_output_dest_nodes,
void** host_ref_output_center_nodes_local_id,
void** host_ref_output_global_edge_id,
int* output_sample_dest_nodes_count,
unsigned long long random_seed);
void gen_csr_graph(
int64_t graph_node_count,
int64_t graph_edge_count,
void* host_csr_row_ptr,
wholememory_array_description_t graph_csr_row_ptr_desc,
void* host_csr_col_ptr,
wholememory_array_description_t graph_csr_col_ptr_desc,
void* host_csr_weight_ptr = nullptr,
wholememory_array_description_t graph_csr_weight_ptr_desc = wholememory_array_description_t{});
void host_check_two_array_same(void* host_array,
wholememory_array_description_t host_array_desc,
void* host_ref,
wholememory_array_description_t host_ref_desc);
void segment_sort_output(void* host_output_sample_offset,
wholememory_array_description_t output_sample_offset_desc,
void* host_output_dest_nodes,
wholememory_array_description_t output_dest_nodes_desc,
void* host_output_global_edge_id,
wholememory_array_description_t output_global_edge_id_desc);
} // namespace testing
} // namespace wholegraph_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/wholegraph_ops/wholegraph_csr_weighted_sample_without_replacement_tests.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <random>
#include <wholememory/tensor_description.h>
#include <wholememory/wholegraph_op.h>
#include <wholememory/wholememory.h>
#include "parallel_utils.hpp"
#include "wholememory/communicator.hpp"
#include "wholememory/env_func_ptrs.hpp"
#include "wholememory/initialize.hpp"
#include "../wholememory/wholememory_test_utils.hpp"
#include "graph_sampling_test_utils.hpp"
typedef struct WholeGraphCSRWeightedSampleWithoutReplacementTestParam {
wholememory_array_description_t get_csr_row_ptr_desc() const
{
return wholememory_create_array_desc(graph_node_count + 1, 0, csr_row_ptr_dtype);
}
wholememory_array_description_t get_csr_col_ptr_desc() const
{
return wholememory_create_array_desc(graph_edge_count, 0, csr_col_ptr_dtype);
}
wholememory_array_description_t get_csr_weight_ptr_desc() const
{
return wholememory_create_array_desc(graph_edge_count, 0, csr_weight_ptr_dtype);
}
wholememory_array_description_t get_center_node_desc() const
{
return wholememory_create_array_desc(center_node_count, 0, center_node_dtype);
}
wholememory_array_description_t get_output_sample_offset_desc() const
{
return wholememory_create_array_desc(center_node_count + 1, 0, output_sample_offset_dtype);
}
int64_t get_graph_node_count() const { return graph_node_count; }
int64_t get_graph_edge_count() const { return graph_edge_count; }
int64_t get_max_sample_count() const { return max_sample_count; }
WholeGraphCSRWeightedSampleWithoutReplacementTestParam& set_memory_type(
wholememory_memory_type_t new_memory_type)
{
memory_type = new_memory_type;
return *this;
};
WholeGraphCSRWeightedSampleWithoutReplacementTestParam& set_memory_location(
wholememory_memory_location_t new_memory_location)
{
memory_location = new_memory_location;
return *this;
};
WholeGraphCSRWeightedSampleWithoutReplacementTestParam& set_max_sample_count(int new_sample_count)
{
max_sample_count = new_sample_count;
return *this;
}
WholeGraphCSRWeightedSampleWithoutReplacementTestParam& set_center_node_count(
int new_center_node_count)
{
center_node_count = new_center_node_count;
return *this;
}
WholeGraphCSRWeightedSampleWithoutReplacementTestParam& set_graph_node_count(
int new_graph_node_count)
{
graph_node_count = new_graph_node_count;
return *this;
}
WholeGraphCSRWeightedSampleWithoutReplacementTestParam& set_graph_edge_couont(
int new_graph_edge_count)
{
graph_edge_count = new_graph_edge_count;
return *this;
}
WholeGraphCSRWeightedSampleWithoutReplacementTestParam& set_center_node_type(
wholememory_dtype_t new_center_node_dtype)
{
center_node_dtype = new_center_node_dtype;
return *this;
}
wholememory_memory_type_t memory_type = WHOLEMEMORY_MT_CHUNKED;
wholememory_memory_location_t memory_location = WHOLEMEMORY_ML_DEVICE;
int64_t max_sample_count = 10;
int64_t center_node_count = 512;
int64_t graph_node_count = 9703LL;
int64_t graph_edge_count = 104323L;
wholememory_dtype_t csr_row_ptr_dtype = WHOLEMEMORY_DT_INT64;
wholememory_dtype_t csr_col_ptr_dtype = WHOLEMEMORY_DT_INT;
wholememory_dtype_t csr_weight_ptr_dtype = WHOLEMEMORY_DT_FLOAT;
wholememory_dtype_t center_node_dtype = WHOLEMEMORY_DT_INT;
wholememory_dtype_t output_sample_offset_dtype = WHOLEMEMORY_DT_INT;
wholememory_dtype_t output_dest_node_dtype = center_node_dtype;
wholememory_dtype_t output_center_node_local_id_dtype = WHOLEMEMORY_DT_INT;
wholememory_dtype_t output_globla_edge_id_dtype = WHOLEMEMORY_DT_INT64;
} WholeGraphCSRWeightedSampleWithoutReplacementTestParam;
class WholeGraphCSRWeightedSampleWithoutReplacementParameterTests
: public ::testing::TestWithParam<WholeGraphCSRWeightedSampleWithoutReplacementTestParam> {};
TEST_P(WholeGraphCSRWeightedSampleWithoutReplacementParameterTests, WeightedSampleTest)
{
auto params = GetParam();
int dev_count = ForkGetDeviceCount();
EXPECT_GE(dev_count, 1);
std::vector<std::array<int, 2>> pipes;
CreatePipes(&pipes, dev_count);
auto graph_node_count = params.get_graph_node_count();
auto graph_edge_count = params.get_graph_edge_count();
auto graph_csr_row_ptr_desc = params.get_csr_row_ptr_desc();
auto graph_csr_col_ptr_desc = params.get_csr_col_ptr_desc();
auto graph_csr_weight_ptr_desc = params.get_csr_weight_ptr_desc();
void* host_csr_row_ptr =
(void*)malloc(wholememory_get_memory_size_from_array(&graph_csr_row_ptr_desc));
void* host_csr_col_ptr =
(void*)malloc(wholememory_get_memory_size_from_array(&graph_csr_col_ptr_desc));
void* host_csr_weight_ptr =
(void*)malloc(wholememory_get_memory_size_from_array(&graph_csr_weight_ptr_desc));
wholegraph_ops::testing::gen_csr_graph(graph_node_count,
graph_edge_count,
host_csr_row_ptr,
graph_csr_row_ptr_desc,
host_csr_col_ptr,
graph_csr_col_ptr_desc,
host_csr_weight_ptr,
graph_csr_weight_ptr_desc);
MultiProcessRun(
dev_count,
[¶ms, &pipes, host_csr_row_ptr, host_csr_col_ptr, host_csr_weight_ptr](int world_rank,
int world_size) {
thread_local std::random_device rd;
thread_local std::mt19937 gen(rd());
thread_local std::uniform_int_distribution<unsigned long long> distrib;
unsigned long long random_seed = distrib(gen);
EXPECT_EQ(wholememory_init(0), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaSetDevice(world_rank), cudaSuccess);
wholememory_comm_t wm_comm = create_communicator_by_pipes(pipes, world_rank, world_size);
if (wholememory_communicator_support_type_location(
wm_comm, params.memory_type, params.memory_location) != WHOLEMEMORY_SUCCESS) {
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
if (world_rank == 0) GTEST_SKIP_("Skip due to not supported.");
return;
}
auto csr_row_ptr_desc = params.get_csr_row_ptr_desc();
auto csr_col_ptr_desc = params.get_csr_col_ptr_desc();
auto csr_weight_ptr_desc = params.get_csr_weight_ptr_desc();
auto center_node_desc = params.get_center_node_desc();
auto output_sample_offset_desc = params.get_output_sample_offset_desc();
auto max_sample_count = params.get_max_sample_count();
int64_t graph_node_count = params.get_graph_node_count();
int64_t graph_edge_count = params.get_graph_edge_count();
size_t center_node_size = wholememory_get_memory_size_from_array(¢er_node_desc);
size_t output_sample_offset_size =
wholememory_get_memory_size_from_array(&output_sample_offset_desc);
cudaStream_t stream;
EXPECT_EQ(cudaStreamCreate(&stream), cudaSuccess);
void *host_ref_output_sample_offset, *host_ref_output_dest_nodes,
*host_ref_output_center_nodes_local_id, *host_ref_output_global_edge_id;
void *host_center_nodes, *host_output_sample_offset, *host_output_dest_nodes,
*host_output_center_nodes_local_id, *host_output_global_edge_id;
void *dev_center_nodes, *dev_output_sample_offset;
wholememory_handle_t csr_row_ptr_memory_handle;
wholememory_handle_t csr_col_ptr_memory_handle;
wholememory_handle_t csr_weight_ptr_memory_handle;
EXPECT_EQ(wholememory_malloc(&csr_row_ptr_memory_handle,
wholememory_get_memory_size_from_array(&csr_row_ptr_desc),
wm_comm,
params.memory_type,
params.memory_location,
wholememory_dtype_get_element_size(csr_row_ptr_desc.dtype)),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_malloc(&csr_col_ptr_memory_handle,
wholememory_get_memory_size_from_array(&csr_col_ptr_desc),
wm_comm,
params.memory_type,
params.memory_location,
wholememory_dtype_get_element_size(csr_col_ptr_desc.dtype)),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_malloc(&csr_weight_ptr_memory_handle,
wholememory_get_memory_size_from_array(&csr_weight_ptr_desc),
wm_comm,
params.memory_type,
params.memory_location,
wholememory_dtype_get_element_size(csr_weight_ptr_desc.dtype)),
WHOLEMEMORY_SUCCESS);
wholegraph_ops::testing::copy_host_array_to_wholememory(
host_csr_row_ptr, csr_row_ptr_memory_handle, csr_row_ptr_desc, stream);
wholegraph_ops::testing::copy_host_array_to_wholememory(
host_csr_col_ptr, csr_col_ptr_memory_handle, csr_col_ptr_desc, stream);
wholegraph_ops::testing::copy_host_array_to_wholememory(
host_csr_weight_ptr, csr_weight_ptr_memory_handle, csr_weight_ptr_desc, stream);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
wholememory_communicator_barrier(wm_comm);
EXPECT_EQ(cudaSetDevice(world_rank), cudaSuccess);
EXPECT_EQ(cudaMallocHost(&host_center_nodes, center_node_size), cudaSuccess);
EXPECT_EQ(cudaMallocHost(&host_output_sample_offset, output_sample_offset_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_center_nodes, center_node_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_output_sample_offset, output_sample_offset_size), cudaSuccess);
wholegraph_ops::testing::host_random_init_array(
host_center_nodes, center_node_desc, 0, graph_node_count - 1);
EXPECT_EQ(cudaMemcpyAsync(dev_center_nodes,
host_center_nodes,
wholememory_get_memory_size_from_array(¢er_node_desc),
cudaMemcpyHostToDevice,
stream),
cudaSuccess);
wholememory_tensor_t wm_csr_row_ptr_tensor, wm_csr_col_ptr_tensor, wm_csr_weight_ptr_tensor;
wholememory_tensor_description_t wm_csr_row_ptr_tensor_desc, wm_csr_col_ptr_tensor_desc,
wm_csr_weight_ptr_tensor_desc;
wholememory_copy_array_desc_to_tensor(&wm_csr_row_ptr_tensor_desc, &csr_row_ptr_desc);
wholememory_copy_array_desc_to_tensor(&wm_csr_col_ptr_tensor_desc, &csr_col_ptr_desc);
wholememory_copy_array_desc_to_tensor(&wm_csr_weight_ptr_tensor_desc, &csr_weight_ptr_desc);
EXPECT_EQ(wholememory_make_tensor_from_handle(
&wm_csr_row_ptr_tensor, csr_row_ptr_memory_handle, &wm_csr_row_ptr_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_make_tensor_from_handle(
&wm_csr_col_ptr_tensor, csr_col_ptr_memory_handle, &wm_csr_col_ptr_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(
wholememory_make_tensor_from_handle(
&wm_csr_weight_ptr_tensor, csr_weight_ptr_memory_handle, &wm_csr_weight_ptr_tensor_desc),
WHOLEMEMORY_SUCCESS);
wholememory_tensor_t center_nodes_tensor, output_sample_offset_tensor;
wholememory_tensor_description_t center_nodes_tensor_desc, output_sample_offset_tensor_desc;
wholememory_copy_array_desc_to_tensor(¢er_nodes_tensor_desc, ¢er_node_desc);
wholememory_copy_array_desc_to_tensor(&output_sample_offset_tensor_desc,
&output_sample_offset_desc);
EXPECT_EQ(wholememory_make_tensor_from_pointer(
¢er_nodes_tensor, dev_center_nodes, ¢er_nodes_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_make_tensor_from_pointer(&output_sample_offset_tensor,
dev_output_sample_offset,
&output_sample_offset_tensor_desc),
WHOLEMEMORY_SUCCESS);
wholememory_env_func_t* default_env_func = wholememory::get_default_env_func();
wholememory::default_memory_context_t output_dest_mem_ctx, output_center_localid_mem_ctx,
output_edge_gid_mem_ctx;
EXPECT_EQ(wholegraph_csr_weighted_sample_without_replacement(wm_csr_row_ptr_tensor,
wm_csr_col_ptr_tensor,
wm_csr_weight_ptr_tensor,
center_nodes_tensor,
max_sample_count,
output_sample_offset_tensor,
&output_dest_mem_ctx,
&output_center_localid_mem_ctx,
&output_edge_gid_mem_ctx,
random_seed,
default_env_func,
stream),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaGetLastError(), cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
wholememory_communicator_barrier(wm_comm);
EXPECT_EQ(output_dest_mem_ctx.desc.dim, 1);
EXPECT_EQ(output_center_localid_mem_ctx.desc.dim, 1);
EXPECT_EQ(output_edge_gid_mem_ctx.desc.dim, 1);
EXPECT_EQ(output_dest_mem_ctx.desc.dtype, csr_col_ptr_desc.dtype);
EXPECT_EQ(output_center_localid_mem_ctx.desc.dtype, WHOLEMEMORY_DT_INT);
EXPECT_EQ(output_edge_gid_mem_ctx.desc.dtype, WHOLEMEMORY_DT_INT64);
EXPECT_EQ(output_dest_mem_ctx.desc.sizes[0], output_center_localid_mem_ctx.desc.sizes[0]);
EXPECT_EQ(output_dest_mem_ctx.desc.sizes[0], output_edge_gid_mem_ctx.desc.sizes[0]);
int64_t total_sample_count = output_dest_mem_ctx.desc.sizes[0];
host_output_dest_nodes =
malloc(total_sample_count * wholememory_dtype_get_element_size(csr_col_ptr_desc.dtype));
host_output_center_nodes_local_id = malloc(total_sample_count * sizeof(int));
host_output_global_edge_id = malloc(total_sample_count * sizeof(int64_t));
EXPECT_EQ(cudaMemcpyAsync(host_output_sample_offset,
dev_output_sample_offset,
output_sample_offset_size,
cudaMemcpyDeviceToHost,
stream),
cudaSuccess);
EXPECT_EQ(cudaMemcpyAsync(
host_output_dest_nodes,
output_dest_mem_ctx.ptr,
total_sample_count * wholememory_dtype_get_element_size(csr_col_ptr_desc.dtype),
cudaMemcpyDeviceToHost,
stream),
cudaSuccess);
EXPECT_EQ(cudaMemcpyAsync(host_output_center_nodes_local_id,
output_center_localid_mem_ctx.ptr,
total_sample_count * sizeof(int),
cudaMemcpyDeviceToHost,
stream),
cudaSuccess);
EXPECT_EQ(cudaMemcpyAsync(host_output_global_edge_id,
output_edge_gid_mem_ctx.ptr,
total_sample_count * sizeof(int64_t),
cudaMemcpyDeviceToHost,
stream),
cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
wholememory_communicator_barrier(wm_comm);
wholegraph_ops::testing::segment_sort_output(
host_output_sample_offset,
output_sample_offset_desc,
host_output_dest_nodes,
wholememory_create_array_desc(total_sample_count, 0, csr_col_ptr_desc.dtype),
host_output_global_edge_id,
wholememory_create_array_desc(total_sample_count, 0, WHOLEMEMORY_DT_INT64));
int host_total_sample_count;
wholegraph_ops::testing::wholegraph_csr_weighted_sample_without_replacement_cpu(
host_csr_row_ptr,
csr_row_ptr_desc,
host_csr_col_ptr,
csr_col_ptr_desc,
host_csr_weight_ptr,
csr_weight_ptr_desc,
host_center_nodes,
center_node_desc,
max_sample_count,
&host_ref_output_sample_offset,
output_sample_offset_desc,
&host_ref_output_dest_nodes,
&host_ref_output_center_nodes_local_id,
&host_ref_output_global_edge_id,
&host_total_sample_count,
random_seed);
EXPECT_EQ(total_sample_count, host_total_sample_count);
wholegraph_ops::testing::segment_sort_output(
host_ref_output_sample_offset,
output_sample_offset_desc,
host_ref_output_dest_nodes,
wholememory_create_array_desc(host_total_sample_count, 0, csr_col_ptr_desc.dtype),
host_ref_output_global_edge_id,
wholememory_create_array_desc(host_total_sample_count, 0, WHOLEMEMORY_DT_INT64));
wholegraph_ops::testing::host_check_two_array_same(host_output_sample_offset,
output_sample_offset_desc,
host_ref_output_sample_offset,
output_sample_offset_desc);
wholegraph_ops::testing::host_check_two_array_same(
host_output_dest_nodes,
wholememory_create_array_desc(host_total_sample_count, 0, csr_col_ptr_desc.dtype),
host_ref_output_dest_nodes,
wholememory_create_array_desc(host_total_sample_count, 0, csr_col_ptr_desc.dtype));
wholegraph_ops::testing::host_check_two_array_same(
host_output_center_nodes_local_id,
wholememory_create_array_desc(host_total_sample_count, 0, WHOLEMEMORY_DT_INT),
host_ref_output_center_nodes_local_id,
wholememory_create_array_desc(host_total_sample_count, 0, WHOLEMEMORY_DT_INT));
wholegraph_ops::testing::host_check_two_array_same(
host_output_global_edge_id,
wholememory_create_array_desc(host_total_sample_count, 0, WHOLEMEMORY_DT_INT64),
host_ref_output_global_edge_id,
wholememory_create_array_desc(host_total_sample_count, 0, WHOLEMEMORY_DT_INT64));
(default_env_func->output_fns).free_fn(&output_dest_mem_ctx, nullptr);
(default_env_func->output_fns).free_fn(&output_center_localid_mem_ctx, nullptr);
(default_env_func->output_fns).free_fn(&output_edge_gid_mem_ctx, nullptr);
if (host_ref_output_sample_offset != nullptr) free(host_ref_output_sample_offset);
if (host_ref_output_dest_nodes != nullptr) free(host_ref_output_dest_nodes);
if (host_ref_output_center_nodes_local_id != nullptr)
free(host_ref_output_center_nodes_local_id);
if (host_ref_output_global_edge_id != nullptr) free(host_ref_output_global_edge_id);
EXPECT_EQ(cudaFreeHost(host_center_nodes), cudaSuccess);
EXPECT_EQ(cudaFreeHost(host_output_sample_offset), cudaSuccess);
EXPECT_EQ(cudaFree(dev_center_nodes), cudaSuccess);
EXPECT_EQ(cudaFree(dev_output_sample_offset), cudaSuccess);
EXPECT_EQ(wholememory_free(csr_row_ptr_memory_handle), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_free(csr_col_ptr_memory_handle), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_free(csr_weight_ptr_memory_handle), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
},
true);
if (host_csr_row_ptr != nullptr) free(host_csr_row_ptr);
if (host_csr_col_ptr != nullptr) free(host_csr_col_ptr);
if (host_csr_weight_ptr != nullptr) free(host_csr_weight_ptr);
}
INSTANTIATE_TEST_SUITE_P(WholeGraphCSRWeightedSampleWithoutReplacementOpTests,
WholeGraphCSRWeightedSampleWithoutReplacementParameterTests,
::testing::Values(WholeGraphCSRWeightedSampleWithoutReplacementTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS),
WholeGraphCSRWeightedSampleWithoutReplacementTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED),
WholeGraphCSRWeightedSampleWithoutReplacementTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_memory_location(WHOLEMEMORY_ML_HOST),
WholeGraphCSRWeightedSampleWithoutReplacementTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_memory_location(WHOLEMEMORY_ML_HOST),
WholeGraphCSRWeightedSampleWithoutReplacementTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_max_sample_count(10)
.set_center_node_count(35)
.set_graph_node_count(23289)
.set_graph_edge_couont(689403),
WholeGraphCSRWeightedSampleWithoutReplacementTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_max_sample_count(300)
.set_center_node_count(256)
.set_graph_node_count(23200)
.set_graph_edge_couont(68940300),
WholeGraphCSRWeightedSampleWithoutReplacementTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_center_node_type(WHOLEMEMORY_DT_INT64)));
| 0 |
rapidsai_public_repos/wholegraph/cpp/tests
|
rapidsai_public_repos/wholegraph/cpp/tests/wholegraph_ops/wholegraph_csr_unweighted_sample_without_replacement_tests.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <random>
#include <wholememory/tensor_description.h>
#include <wholememory/wholegraph_op.h>
#include <wholememory/wholememory.h>
#include "parallel_utils.hpp"
#include "wholememory/communicator.hpp"
#include "wholememory/env_func_ptrs.hpp"
#include "wholememory/initialize.hpp"
#include "../wholememory/wholememory_test_utils.hpp"
#include "graph_sampling_test_utils.hpp"
typedef struct WholeGraphCSRUnweightedSampleWithoutReplacementTestParam {
wholememory_array_description_t get_csr_row_ptr_desc() const
{
return wholememory_create_array_desc(graph_node_count + 1, 0, csr_row_ptr_dtype);
}
wholememory_array_description_t get_csr_col_ptr_desc() const
{
return wholememory_create_array_desc(graph_edge_count, 0, csr_col_ptr_dtype);
}
wholememory_array_description_t get_center_node_desc() const
{
return wholememory_create_array_desc(center_node_count, 0, center_node_dtype);
}
wholememory_array_description_t get_output_sample_offset_desc() const
{
return wholememory_create_array_desc(center_node_count + 1, 0, output_sample_offset_dtype);
}
int64_t get_graph_node_count() const { return graph_node_count; }
int64_t get_graph_edge_count() const { return graph_edge_count; }
int64_t get_max_sample_count() const { return max_sample_count; }
WholeGraphCSRUnweightedSampleWithoutReplacementTestParam& set_memory_type(
wholememory_memory_type_t new_memory_type)
{
memory_type = new_memory_type;
return *this;
};
WholeGraphCSRUnweightedSampleWithoutReplacementTestParam& set_memory_location(
wholememory_memory_location_t new_memory_location)
{
memory_location = new_memory_location;
return *this;
};
WholeGraphCSRUnweightedSampleWithoutReplacementTestParam& set_max_sample_count(
int new_sample_count)
{
max_sample_count = new_sample_count;
return *this;
}
WholeGraphCSRUnweightedSampleWithoutReplacementTestParam& set_center_node_count(
int new_center_node_count)
{
center_node_count = new_center_node_count;
return *this;
}
WholeGraphCSRUnweightedSampleWithoutReplacementTestParam& set_graph_node_count(
int new_graph_node_count)
{
graph_node_count = new_graph_node_count;
return *this;
}
WholeGraphCSRUnweightedSampleWithoutReplacementTestParam& set_graph_edge_couont(
int new_graph_edge_count)
{
graph_edge_count = new_graph_edge_count;
return *this;
}
WholeGraphCSRUnweightedSampleWithoutReplacementTestParam& set_center_node_type(
wholememory_dtype_t new_center_node_dtype)
{
center_node_dtype = new_center_node_dtype;
return *this;
}
wholememory_memory_type_t memory_type = WHOLEMEMORY_MT_CHUNKED;
wholememory_memory_location_t memory_location = WHOLEMEMORY_ML_DEVICE;
int64_t max_sample_count = 50;
int64_t center_node_count = 512;
int64_t graph_node_count = 9703LL;
int64_t graph_edge_count = 104323L;
wholememory_dtype_t csr_row_ptr_dtype = WHOLEMEMORY_DT_INT64;
wholememory_dtype_t csr_col_ptr_dtype = WHOLEMEMORY_DT_INT;
wholememory_dtype_t center_node_dtype = WHOLEMEMORY_DT_INT;
wholememory_dtype_t output_sample_offset_dtype = WHOLEMEMORY_DT_INT;
wholememory_dtype_t output_dest_node_dtype = center_node_dtype;
wholememory_dtype_t output_center_node_local_id_dtype = WHOLEMEMORY_DT_INT;
wholememory_dtype_t output_globla_edge_id_dtype = WHOLEMEMORY_DT_INT64;
} WholeGraphCSRUnweightedSampleWithoutReplacementTestParam;
class WholeGraphCSRUnweightedSampleWithoutReplacementParameterTests
: public ::testing::TestWithParam<WholeGraphCSRUnweightedSampleWithoutReplacementTestParam> {};
TEST_P(WholeGraphCSRUnweightedSampleWithoutReplacementParameterTests, UnWeightedSampleTest)
{
auto params = GetParam();
int dev_count = ForkGetDeviceCount();
EXPECT_GE(dev_count, 1);
std::vector<std::array<int, 2>> pipes;
CreatePipes(&pipes, dev_count);
auto graph_node_count = params.get_graph_node_count();
auto graph_edge_count = params.get_graph_edge_count();
auto graph_csr_row_ptr_desc = params.get_csr_row_ptr_desc();
auto graph_csr_col_ptr_desc = params.get_csr_col_ptr_desc();
void* host_csr_row_ptr =
(void*)malloc(wholememory_get_memory_size_from_array(&graph_csr_row_ptr_desc));
void* host_csr_col_ptr =
(void*)malloc(wholememory_get_memory_size_from_array(&graph_csr_col_ptr_desc));
wholegraph_ops::testing::gen_csr_graph(graph_node_count,
graph_edge_count,
host_csr_row_ptr,
graph_csr_row_ptr_desc,
host_csr_col_ptr,
graph_csr_col_ptr_desc);
MultiProcessRun(
dev_count,
[¶ms, &pipes, host_csr_row_ptr, host_csr_col_ptr](int world_rank, int world_size) {
thread_local std::random_device rd;
thread_local std::mt19937 gen(rd());
thread_local std::uniform_int_distribution<unsigned long long> distrib;
unsigned long long random_seed = distrib(gen);
EXPECT_EQ(wholememory_init(0), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaSetDevice(world_rank), cudaSuccess);
wholememory_comm_t wm_comm = create_communicator_by_pipes(pipes, world_rank, world_size);
if (wholememory_communicator_support_type_location(
wm_comm, params.memory_type, params.memory_location) != WHOLEMEMORY_SUCCESS) {
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
if (world_rank == 0) GTEST_SKIP_("Skip due to not supported.");
return;
}
auto csr_row_ptr_desc = params.get_csr_row_ptr_desc();
auto csr_col_ptr_desc = params.get_csr_col_ptr_desc();
auto center_node_desc = params.get_center_node_desc();
auto output_sample_offset_desc = params.get_output_sample_offset_desc();
auto max_sample_count = params.get_max_sample_count();
int64_t graph_node_count = params.get_graph_node_count();
int64_t graph_edge_count = params.get_graph_edge_count();
size_t center_node_size = wholememory_get_memory_size_from_array(¢er_node_desc);
size_t output_sample_offset_size =
wholememory_get_memory_size_from_array(&output_sample_offset_desc);
cudaStream_t stream;
EXPECT_EQ(cudaStreamCreate(&stream), cudaSuccess);
void *host_ref_output_sample_offset, *host_ref_output_dest_nodes,
*host_ref_output_center_nodes_local_id, *host_ref_output_global_edge_id;
void *host_center_nodes, *host_output_sample_offset, *host_output_dest_nodes,
*host_output_center_nodes_local_id, *host_output_global_edge_id;
void *dev_center_nodes, *dev_output_sample_offset;
wholememory_handle_t csr_row_ptr_memory_handle;
wholememory_handle_t csr_col_ptr_memory_handle;
EXPECT_EQ(wholememory_malloc(&csr_row_ptr_memory_handle,
wholememory_get_memory_size_from_array(&csr_row_ptr_desc),
wm_comm,
params.memory_type,
params.memory_location,
wholememory_dtype_get_element_size(csr_row_ptr_desc.dtype)),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_malloc(&csr_col_ptr_memory_handle,
wholememory_get_memory_size_from_array(&csr_col_ptr_desc),
wm_comm,
params.memory_type,
params.memory_location,
wholememory_dtype_get_element_size(csr_col_ptr_desc.dtype)),
WHOLEMEMORY_SUCCESS);
wholegraph_ops::testing::copy_host_array_to_wholememory(
host_csr_row_ptr, csr_row_ptr_memory_handle, csr_row_ptr_desc, stream);
wholegraph_ops::testing::copy_host_array_to_wholememory(
host_csr_col_ptr, csr_col_ptr_memory_handle, csr_col_ptr_desc, stream);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
wholememory_communicator_barrier(wm_comm);
EXPECT_EQ(cudaSetDevice(world_rank), cudaSuccess);
EXPECT_EQ(cudaMallocHost(&host_center_nodes, center_node_size), cudaSuccess);
EXPECT_EQ(cudaMallocHost(&host_output_sample_offset, output_sample_offset_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_center_nodes, center_node_size), cudaSuccess);
EXPECT_EQ(cudaMalloc(&dev_output_sample_offset, output_sample_offset_size), cudaSuccess);
wholegraph_ops::testing::host_random_init_array(
host_center_nodes, center_node_desc, 0, graph_node_count - 1);
EXPECT_EQ(cudaMemcpyAsync(dev_center_nodes,
host_center_nodes,
wholememory_get_memory_size_from_array(¢er_node_desc),
cudaMemcpyHostToDevice,
stream),
cudaSuccess);
wholememory_tensor_t wm_csr_row_ptr_tensor, wm_csr_col_ptr_tensor;
wholememory_tensor_description_t wm_csr_row_ptr_tensor_desc, wm_csr_col_ptr_tensor_desc;
wholememory_copy_array_desc_to_tensor(&wm_csr_row_ptr_tensor_desc, &csr_row_ptr_desc);
wholememory_copy_array_desc_to_tensor(&wm_csr_col_ptr_tensor_desc, &csr_col_ptr_desc);
EXPECT_EQ(wholememory_make_tensor_from_handle(
&wm_csr_row_ptr_tensor, csr_row_ptr_memory_handle, &wm_csr_row_ptr_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_make_tensor_from_handle(
&wm_csr_col_ptr_tensor, csr_col_ptr_memory_handle, &wm_csr_col_ptr_tensor_desc),
WHOLEMEMORY_SUCCESS);
wholememory_tensor_t center_nodes_tensor, output_sample_offset_tensor;
wholememory_tensor_description_t center_nodes_tensor_desc, output_sample_offset_tensor_desc;
wholememory_copy_array_desc_to_tensor(¢er_nodes_tensor_desc, ¢er_node_desc);
wholememory_copy_array_desc_to_tensor(&output_sample_offset_tensor_desc,
&output_sample_offset_desc);
EXPECT_EQ(wholememory_make_tensor_from_pointer(
¢er_nodes_tensor, dev_center_nodes, ¢er_nodes_tensor_desc),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_make_tensor_from_pointer(&output_sample_offset_tensor,
dev_output_sample_offset,
&output_sample_offset_tensor_desc),
WHOLEMEMORY_SUCCESS);
wholememory_env_func_t* default_env_func = wholememory::get_default_env_func();
wholememory::default_memory_context_t output_dest_mem_ctx, output_center_localid_mem_ctx,
output_edge_gid_mem_ctx;
EXPECT_EQ(wholegraph_csr_unweighted_sample_without_replacement(wm_csr_row_ptr_tensor,
wm_csr_col_ptr_tensor,
center_nodes_tensor,
max_sample_count,
output_sample_offset_tensor,
&output_dest_mem_ctx,
&output_center_localid_mem_ctx,
&output_edge_gid_mem_ctx,
random_seed,
default_env_func,
stream),
WHOLEMEMORY_SUCCESS);
EXPECT_EQ(cudaGetLastError(), cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
wholememory_communicator_barrier(wm_comm);
EXPECT_EQ(output_dest_mem_ctx.desc.dim, 1);
EXPECT_EQ(output_center_localid_mem_ctx.desc.dim, 1);
EXPECT_EQ(output_edge_gid_mem_ctx.desc.dim, 1);
EXPECT_EQ(output_dest_mem_ctx.desc.dtype, csr_col_ptr_desc.dtype);
EXPECT_EQ(output_center_localid_mem_ctx.desc.dtype, WHOLEMEMORY_DT_INT);
EXPECT_EQ(output_edge_gid_mem_ctx.desc.dtype, WHOLEMEMORY_DT_INT64);
EXPECT_EQ(output_dest_mem_ctx.desc.sizes[0], output_center_localid_mem_ctx.desc.sizes[0]);
EXPECT_EQ(output_dest_mem_ctx.desc.sizes[0], output_edge_gid_mem_ctx.desc.sizes[0]);
int64_t total_sample_count = output_dest_mem_ctx.desc.sizes[0];
host_output_dest_nodes =
malloc(total_sample_count * wholememory_dtype_get_element_size(csr_col_ptr_desc.dtype));
host_output_center_nodes_local_id = malloc(total_sample_count * sizeof(int));
host_output_global_edge_id = malloc(total_sample_count * sizeof(int64_t));
EXPECT_EQ(cudaMemcpyAsync(host_output_sample_offset,
dev_output_sample_offset,
output_sample_offset_size,
cudaMemcpyDeviceToHost,
stream),
cudaSuccess);
EXPECT_EQ(cudaMemcpyAsync(
host_output_dest_nodes,
output_dest_mem_ctx.ptr,
total_sample_count * wholememory_dtype_get_element_size(csr_col_ptr_desc.dtype),
cudaMemcpyDeviceToHost,
stream),
cudaSuccess);
EXPECT_EQ(cudaMemcpyAsync(host_output_center_nodes_local_id,
output_center_localid_mem_ctx.ptr,
total_sample_count * sizeof(int),
cudaMemcpyDeviceToHost,
stream),
cudaSuccess);
EXPECT_EQ(cudaMemcpyAsync(host_output_global_edge_id,
output_edge_gid_mem_ctx.ptr,
total_sample_count * sizeof(int64_t),
cudaMemcpyDeviceToHost,
stream),
cudaSuccess);
EXPECT_EQ(cudaStreamSynchronize(stream), cudaSuccess);
wholememory_communicator_barrier(wm_comm);
int host_total_sample_count;
wholegraph_ops::testing::wholegraph_csr_unweighted_sample_without_replacement_cpu(
host_csr_row_ptr,
csr_row_ptr_desc,
host_csr_col_ptr,
csr_col_ptr_desc,
host_center_nodes,
center_node_desc,
max_sample_count,
&host_ref_output_sample_offset,
output_sample_offset_desc,
&host_ref_output_dest_nodes,
&host_ref_output_center_nodes_local_id,
&host_ref_output_global_edge_id,
&host_total_sample_count,
random_seed);
EXPECT_EQ(total_sample_count, host_total_sample_count);
wholegraph_ops::testing::host_check_two_array_same(host_output_sample_offset,
output_sample_offset_desc,
host_ref_output_sample_offset,
output_sample_offset_desc);
wholegraph_ops::testing::host_check_two_array_same(
host_output_dest_nodes,
wholememory_create_array_desc(host_total_sample_count, 0, csr_col_ptr_desc.dtype),
host_ref_output_dest_nodes,
wholememory_create_array_desc(host_total_sample_count, 0, csr_col_ptr_desc.dtype));
wholegraph_ops::testing::host_check_two_array_same(
host_output_center_nodes_local_id,
wholememory_create_array_desc(host_total_sample_count, 0, WHOLEMEMORY_DT_INT),
host_ref_output_center_nodes_local_id,
wholememory_create_array_desc(host_total_sample_count, 0, WHOLEMEMORY_DT_INT));
wholegraph_ops::testing::host_check_two_array_same(
host_output_global_edge_id,
wholememory_create_array_desc(host_total_sample_count, 0, WHOLEMEMORY_DT_INT64),
host_ref_output_global_edge_id,
wholememory_create_array_desc(host_total_sample_count, 0, WHOLEMEMORY_DT_INT64));
(default_env_func->output_fns).free_fn(&output_dest_mem_ctx, nullptr);
(default_env_func->output_fns).free_fn(&output_center_localid_mem_ctx, nullptr);
(default_env_func->output_fns).free_fn(&output_edge_gid_mem_ctx, nullptr);
EXPECT_EQ(wholememory_free(csr_row_ptr_memory_handle), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_free(csr_col_ptr_memory_handle), WHOLEMEMORY_SUCCESS);
if (host_ref_output_sample_offset != nullptr) free(host_ref_output_sample_offset);
if (host_ref_output_dest_nodes != nullptr) free(host_ref_output_dest_nodes);
if (host_ref_output_center_nodes_local_id != nullptr)
free(host_ref_output_center_nodes_local_id);
if (host_ref_output_global_edge_id != nullptr) free(host_ref_output_global_edge_id);
EXPECT_EQ(cudaFreeHost(host_center_nodes), cudaSuccess);
EXPECT_EQ(cudaFreeHost(host_output_sample_offset), cudaSuccess);
EXPECT_EQ(cudaFree(dev_center_nodes), cudaSuccess);
EXPECT_EQ(cudaFree(dev_output_sample_offset), cudaSuccess);
EXPECT_EQ(wholememory::destroy_all_communicators(), WHOLEMEMORY_SUCCESS);
EXPECT_EQ(wholememory_finalize(), WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK(::testing::Test::HasFailure() == false);
},
true);
if (host_csr_row_ptr != nullptr) free(host_csr_row_ptr);
if (host_csr_col_ptr != nullptr) free(host_csr_col_ptr);
}
INSTANTIATE_TEST_SUITE_P(
WholeGraphCSRUnweightedSampleWithoutReplacementOpTests,
WholeGraphCSRUnweightedSampleWithoutReplacementParameterTests,
::testing::Values(WholeGraphCSRUnweightedSampleWithoutReplacementTestParam().set_memory_type(
WHOLEMEMORY_MT_CONTINUOUS),
WholeGraphCSRUnweightedSampleWithoutReplacementTestParam().set_memory_type(
WHOLEMEMORY_MT_CHUNKED),
WholeGraphCSRUnweightedSampleWithoutReplacementTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_memory_location(WHOLEMEMORY_ML_HOST),
WholeGraphCSRUnweightedSampleWithoutReplacementTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_memory_location(WHOLEMEMORY_ML_HOST),
WholeGraphCSRUnweightedSampleWithoutReplacementTestParam()
.set_memory_type(WHOLEMEMORY_MT_CONTINUOUS)
.set_max_sample_count(10)
.set_center_node_count(35)
.set_graph_node_count(23289)
.set_graph_edge_couont(689403),
WholeGraphCSRUnweightedSampleWithoutReplacementTestParam()
.set_memory_type(WHOLEMEMORY_MT_CHUNKED)
.set_center_node_type(WHOLEMEMORY_DT_INT64)));
| 0 |
rapidsai_public_repos/wholegraph/cpp
|
rapidsai_public_repos/wholegraph/cpp/bench/CMakeLists.txt
|
#=============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
# option(BUILD_BENCHMARKS "Build wholegraph C++ benchmark tests" ON)
message(VERBOSE "WHOLEGRAPH: Building wholegraph C++ benchmarks: ${BUILD_BENCHMARKS}")
function(ConfigureBench)
set(options OPTIONAL)
set(oneValueArgs NAME)
set(multiValueArgs PATH TARGETS CONFIGURATIONS)
cmake_parse_arguments(ConfigureBench "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
set(BENCH_NAME ${ConfigureBench_NAME})
add_executable(${BENCH_NAME} ${ConfigureBench_PATH})
target_include_directories(${BENCH_NAME} PRIVATE "$<BUILD_INTERFACE:${WHOLEGRAPH_SOURCE_DIR}>/src")
target_link_libraries(
${BENCH_NAME}
PRIVATE wholegraph
raft::raft
rmm::rmm
pthread
)
set_target_properties(
${BENCH_NAME}
PROPERTIES # set target compile options
INSTALL_RPATH "\$ORIGIN/../../../lib"
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_ARCHITECTURES "${CMAKE_CUDA_ARCHITECTURES}"
POSITION_INDEPENDENT_CODE ON
RUNTIME_OUTPUT_DIRECTORY "$<BUILD_INTERFACE:${WHOLEGRAPH_BINARY_DIR}/gbench>"
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_compile_options(${BENCH_NAME} PUBLIC $<$<COMPILE_LANG_AND_ID:CXX,GNU,Clang>:-Wall -Werror
-Wno-error=deprecated-declarations>)
install(
TARGETS ${BENCH_NAME}
COMPONENT testing
DESTINATION bin/gbench/libwholegraph
EXCLUDE_FROM_ALL
)
endfunction()
if(BUILD_BENCHMARKS)
ConfigureBench(
NAME GATHER_SCATTER_BENCH
PATH wholememory_ops/gather_scatter_bench.cu
common/wholegraph_benchmark.cpp
)
endif()
| 0 |
rapidsai_public_repos/wholegraph/cpp/bench
|
rapidsai_public_repos/wholegraph/cpp/bench/wholememory_ops/gather_scatter_bench.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <getopt.h>
#include <sys/time.h>
#include <unistd.h>
#include <iostream>
#include <string>
#include <string_view>
#include <wholememory/tensor_description.h>
#include <wholememory/wholememory.h>
#include <wholememory/wholememory_op.h>
#include "../common/wholegraph_benchmark.hpp"
#include "parallel_utils.hpp"
#include "wholememory/communicator.hpp"
#include "wholememory/env_func_ptrs.hpp"
#include "wholememory/initialize.hpp"
#include "../../tests/wholememory/wholememory_test_utils.hpp"
namespace wholegraph::bench::gather_scatter {
typedef struct GatherScatterBenchParam {
wholememory_matrix_description_t get_embedding_desc() const
{
int64_t embedding_entry_count = get_embedding_entry_count();
int64_t matrix_sizes[2] = {embedding_entry_count, embedding_dim};
return wholememory_create_matrix_desc(
matrix_sizes, embedding_stride, embedding_storage_offset, embedding_type);
}
wholememory_array_description_t get_indices_desc() const
{
int64_t indices_count = get_indices_count();
return wholememory_create_array_desc(indices_count, indices_storage_offset, indices_type);
}
wholememory_matrix_description_t get_output_desc() const
{
int64_t indices_count = get_indices_count();
int64_t output_sizes[2] = {indices_count, embedding_dim};
return wholememory_create_matrix_desc(
output_sizes, output_stride, output_storage_offset, output_type);
}
int64_t get_embedding_granularity() const
{
return embedding_stride * wholememory_dtype_get_element_size(embedding_type);
}
int64_t get_embedding_table_size() const { return embedding_table_size; }
int64_t get_gather_size() const { return gather_size; }
wholememory_memory_type_t get_memory_type() const { return memory_type; }
wholememory_memory_location_t get_memory_location() const { return memory_location; }
int get_loop_count() const { return loop_count; }
std::string get_test_type() const { return test_type; }
std::string get_server_addr() const { return server_addr; }
int get_server_port() const { return server_port; }
int get_node_rank() const { return node_rank; }
int get_node_size() const { return node_size; }
int get_num_gpu() const { return num_gpu; }
int64_t get_embedding_dim() const { return embedding_dim; }
wholememory_dtype_t get_embedding_type() const { return embedding_type; }
GatherScatterBenchParam& set_memory_type(wholememory_memory_type_t new_memory_type)
{
memory_type = new_memory_type;
return *this;
}
GatherScatterBenchParam& set_memory_location(wholememory_memory_location_t new_memory_location)
{
memory_location = new_memory_location;
return *this;
}
GatherScatterBenchParam& set_embedding_table_size(int64_t new_embedding_table_size)
{
int64_t entry_size = wholememory_dtype_get_element_size(embedding_type) * get_embedding_dim();
embedding_table_size = (new_embedding_table_size + entry_size - 1) / entry_size * entry_size;
return *this;
}
GatherScatterBenchParam& set_gather_size(int64_t new_gather_size)
{
int64_t entry_size = wholememory_dtype_get_element_size(embedding_type) * get_embedding_dim();
gather_size = (new_gather_size + entry_size - 1) / entry_size * entry_size;
return *this;
}
GatherScatterBenchParam& set_embedding_dim(int64_t new_embedding_dim)
{
embedding_dim = new_embedding_dim;
if (embedding_stride != embedding_dim) embedding_stride = embedding_dim;
if (output_stride != embedding_dim) output_stride = embedding_dim;
int64_t entry_size = wholememory_dtype_get_element_size(embedding_type) * embedding_dim;
embedding_table_size = (embedding_table_size + entry_size - 1) / entry_size * entry_size;
gather_size = (gather_size + entry_size - 1) / entry_size * entry_size;
return *this;
}
GatherScatterBenchParam& set_loop_count(int new_loop_count)
{
loop_count = new_loop_count;
return *this;
}
GatherScatterBenchParam& set_test_type(std::string new_test_type)
{
test_type = new_test_type;
return *this;
}
GatherScatterBenchParam& set_server_addr(std::string new_server_addr)
{
server_addr = new_server_addr;
return *this;
}
GatherScatterBenchParam& set_server_port(int new_server_port)
{
server_port = new_server_port;
return *this;
}
GatherScatterBenchParam& set_node_rank(int new_node_rank)
{
node_rank = new_node_rank;
return *this;
}
GatherScatterBenchParam& set_node_size(int new_node_size)
{
node_size = new_node_size;
return *this;
}
GatherScatterBenchParam& set_num_gpu(int new_num_gpu)
{
num_gpu = new_num_gpu;
return *this;
}
private:
int64_t get_embedding_entry_count() const
{
return embedding_table_size / wholememory_dtype_get_element_size(embedding_type) /
embedding_dim;
}
int64_t get_indices_count() const
{
return gather_size / wholememory_dtype_get_element_size(embedding_type) / embedding_dim;
}
GatherScatterBenchParam& set_embedding_stride(int64_t new_embedding_stride)
{
embedding_stride = new_embedding_stride;
return *this;
}
GatherScatterBenchParam& set_output_stride(int64_t new_output_stride)
{
output_stride = new_output_stride;
return *this;
}
GatherScatterBenchParam& set_embedding_type(wholememory_dtype_t new_embedding_type)
{
embedding_type = new_embedding_type;
return *this;
}
GatherScatterBenchParam& set_indices_type(wholememory_dtype_t new_indices_type)
{
indices_type = new_indices_type;
return *this;
}
GatherScatterBenchParam& set_output_type(wholememory_dtype_t new_output_type)
{
output_type = new_output_type;
return *this;
}
wholememory_memory_type_t memory_type = WHOLEMEMORY_MT_CHUNKED;
wholememory_memory_location_t memory_location = WHOLEMEMORY_ML_DEVICE;
int64_t embedding_table_size = 1024000LL;
int64_t gather_size = 1024;
int64_t embedding_dim = 32;
int loop_count = 20;
std::string test_type = "gather"; // gather or scatter
std::string server_addr = "localhost";
int server_port = 24987;
int node_rank = 0;
int node_size = 1;
int num_gpu = 0;
int64_t embedding_stride = 32;
int64_t output_stride = 32;
wholememory_dtype_t embedding_type = WHOLEMEMORY_DT_FLOAT;
wholememory_dtype_t indices_type = WHOLEMEMORY_DT_INT64;
wholememory_dtype_t output_type = WHOLEMEMORY_DT_FLOAT;
int64_t embedding_storage_offset = 0;
int64_t indices_storage_offset = 0;
int64_t output_storage_offset = 0;
} GatherScatterBenchParam;
std::string get_memory_type_string(wholememory_memory_type_t memory_type)
{
std::string str;
switch (memory_type) {
case WHOLEMEMORY_MT_NONE: str = "WHOLEMEMORY_MT_NONE"; break;
case WHOLEMEMORY_MT_CONTINUOUS: str = "WHOLEMEMORY_MT_CONTINUOUS"; break;
case WHOLEMEMORY_MT_CHUNKED: str = "WHOLEMEMORY_MT_CHUNKED"; break;
case WHOLEMEMORY_MT_DISTRIBUTED: str = "WHOLEMEMORY_MT_DISTRIBUTED"; break;
default: break;
}
return str;
}
std::string get_memory_location_string(wholememory_memory_location_t memory_location)
{
std::string str;
switch (memory_location) {
case WHOLEMEMORY_ML_NONE: str = "WHOLEMEMORY_ML_NONE"; break;
case WHOLEMEMORY_ML_DEVICE: str = "WHOLEMEMORY_ML_DEVICE"; break;
case WHOLEMEMORY_ML_HOST: str = "WHOLEMEMORY_ML_HOST"; break;
default: break;
}
return str;
}
void gather_scatter_benchmark(GatherScatterBenchParam& params)
{
int g_dev_count = ForkGetDeviceCount();
WHOLEMEMORY_CHECK_NOTHROW(g_dev_count >= 1);
if (params.get_num_gpu() == 0) { params.set_num_gpu(g_dev_count); }
MultiProcessRun(
g_dev_count,
[¶ms](int local_rank, int local_size) {
WHOLEMEMORY_CHECK_NOTHROW(wholememory_init(0) == WHOLEMEMORY_SUCCESS);
WM_CUDA_CHECK_NO_THROW(cudaSetDevice(local_rank));
int world_size = local_size * params.get_node_size();
int world_rank = params.get_node_rank() * params.get_num_gpu() + local_rank;
SideBandCommunicator* side_band_communicator = StartSidebandCommunicator(
world_rank, world_size, params.get_server_addr().c_str(), params.get_server_port());
wholememory_comm_t wm_comm =
create_communicator_by_socket(side_band_communicator, world_rank, world_size);
ShutDownSidebandCommunicator(side_band_communicator);
auto embedding_desc = params.get_embedding_desc();
auto indices_desc = params.get_indices_desc();
auto output_desc = params.get_output_desc();
std::string test_type = params.get_test_type();
size_t embedding_entry_size = params.get_embedding_granularity();
wholememory_tensor_t embedding_tensor;
wholememory_tensor_description_t embedding_tensor_desc;
wholememory_copy_matrix_desc_to_tensor(&embedding_tensor_desc, &embedding_desc);
WHOLEMEMORY_CHECK_NOTHROW(wholememory_create_tensor(&embedding_tensor,
&embedding_tensor_desc,
wm_comm,
params.get_memory_type(),
params.get_memory_location()) ==
WHOLEMEMORY_SUCCESS);
cudaStream_t stream;
WM_CUDA_CHECK_NO_THROW(cudaStreamCreate(&stream));
void *dev_indices = nullptr, *dev_gather_buffer = nullptr;
void* host_indices = nullptr;
size_t gather_buffer_size = params.get_gather_size();
size_t indices_buffer_size = wholememory_get_memory_size_from_array(&indices_desc);
WM_CUDA_CHECK_NO_THROW(cudaMallocHost(&host_indices, indices_buffer_size));
WM_CUDA_CHECK_NO_THROW(cudaMalloc(&dev_indices, indices_buffer_size));
WM_CUDA_CHECK_NO_THROW(cudaMalloc(&dev_gather_buffer, gather_buffer_size));
wholegraph::bench::host_random_init_integer_indices(
host_indices, indices_desc, embedding_desc.sizes[0]);
WM_CUDA_CHECK_NO_THROW(cudaMemcpyAsync(dev_indices,
host_indices,
wholememory_get_memory_size_from_array(&indices_desc),
cudaMemcpyHostToDevice,
stream));
WM_CUDA_CHECK_NO_THROW(cudaStreamSynchronize(stream));
WHOLEMEMORY_CHECK_NOTHROW(wholememory_communicator_barrier(wm_comm) == WHOLEMEMORY_SUCCESS);
wholememory_tensor_t indices_tensor, output_tensor;
wholememory_tensor_description_t indices_tensor_desc, output_tensor_desc;
wholememory_copy_array_desc_to_tensor(&indices_tensor_desc, &indices_desc);
wholememory_copy_matrix_desc_to_tensor(&output_tensor_desc, &output_desc);
WHOLEMEMORY_CHECK_NOTHROW(
wholememory_make_tensor_from_pointer(&indices_tensor, dev_indices, &indices_tensor_desc) ==
WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK_NOTHROW(wholememory_make_tensor_from_pointer(
&output_tensor, dev_gather_buffer, &output_tensor_desc) ==
WHOLEMEMORY_SUCCESS);
WM_CUDA_CHECK_NO_THROW(cudaStreamSynchronize(stream));
WHOLEMEMORY_CHECK_NOTHROW(wholememory_communicator_barrier(wm_comm) == WHOLEMEMORY_SUCCESS);
const auto barrier_fn = [&wm_comm]() -> void {
WHOLEMEMORY_CHECK_NOTHROW(wholememory_communicator_barrier(wm_comm) == WHOLEMEMORY_SUCCESS);
};
double emb_size_mb = (double)params.get_embedding_table_size() / 1024.0 / 1024.0;
double gather_size_mb = (double)params.get_gather_size() / 1024.0 / 1024.0;
if (local_rank == 0) {
printf(
"%s, world_size=%d, memoryType=%s, memoryLocation=%s, elt_size=%ld, embeddingDim=%ld, "
"embeddingTableSize=%.2lf MB, gatherSize=%.2lf MB\n",
test_type.c_str(),
world_size,
get_memory_type_string(params.get_memory_type()).c_str(),
get_memory_location_string(params.get_memory_location()).c_str(),
wholememory_dtype_get_element_size(params.get_embedding_type()),
params.get_embedding_dim(),
emb_size_mb,
gather_size_mb);
}
PerformanceMeter meter;
meter.AddMetrics("Bandwidth", "GB/s", gather_buffer_size / 1000.0 / 1000.0 / 1000.0, false)
.SetMaxRunSeconds(1000)
.SetRunCount(params.get_loop_count());
if (test_type.compare("gather") == 0) {
MultiProcessMeasurePerformance(
[&] {
wholememory_gather(embedding_tensor,
indices_tensor,
output_tensor,
wholememory::get_cached_env_func(),
stream);
},
wm_comm,
meter,
barrier_fn);
} else if (test_type.compare("scatter") == 0) {
MultiProcessMeasurePerformance(
[&] {
wholememory_scatter(output_tensor,
indices_tensor,
embedding_tensor,
wholememory::get_cached_env_func(),
stream);
},
wm_comm,
meter,
barrier_fn);
} else {
printf("Invalid test function, should be: gather or scatter\n");
exit(EXIT_FAILURE);
}
wholememory::drop_cached_env_func_cache();
WHOLEMEMORY_CHECK_NOTHROW(wholememory_destroy_tensor(indices_tensor) == WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK_NOTHROW(wholememory_destroy_tensor(output_tensor) == WHOLEMEMORY_SUCCESS);
WM_CUDA_CHECK_NO_THROW(cudaFreeHost(host_indices));
WM_CUDA_CHECK_NO_THROW(cudaFree(dev_indices));
WM_CUDA_CHECK_NO_THROW(cudaFree(dev_gather_buffer));
WHOLEMEMORY_CHECK_NOTHROW(wholememory_destroy_tensor(embedding_tensor) ==
WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK_NOTHROW(wholememory::destroy_all_communicators() == WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK_NOTHROW(wholememory_finalize() == WHOLEMEMORY_SUCCESS);
},
true);
}
} // namespace wholegraph::bench::gather_scatter
int main(int argc, char** argv)
{
wholegraph::bench::gather_scatter::GatherScatterBenchParam params;
const char* optstr = "ht:l:e:g:d:c:f:a:p:r:s:n:";
struct option opts[] = {
{"help", no_argument, NULL, 'h'},
{"memory_type",
required_argument,
NULL,
't'}, // 0: None, 1: Continuous, 2: Chunked, 3 Distributed
{"memory_location", required_argument, NULL, 'l'}, // 0: None, 1: Device, 2: Host
{"embedding_table_size", required_argument, NULL, 'e'},
{"gather_size", required_argument, NULL, 'g'},
{"embedding_dim", required_argument, NULL, 'd'},
{"loop_count", required_argument, NULL, 'c'},
{"test_type", required_argument, NULL, 'f'}, // test_type: gather or scatter
{"node_rank", required_argument, NULL, 'r'}, // node_rank
{"node_size", required_argument, NULL, 's'}, // node_size
{"num_gpu", required_argument, NULL, 'n'}, // num gpu per node
{"server_addr", required_argument, NULL, 'a'}, // server_addr
{"server_port", required_argument, NULL, 'p'} // server_port
};
const char* usage =
"Usage: %s [options]\n"
"Options:\n"
" -h, --help display this help and exit\n"
" -t, --memory_type specify wholememory type, 0: None, 1: Continuous, 2: Chunked, 3: "
"Distributed\n"
" -l, --memory_location specify wholememory location, 0: None, 1: Device, 2: Host\n"
" -e, --embedding_table_size specify embedding table size\n"
" -g, --gather_size specify gather size\n"
" -d, --embedding_dim specify embedding dimension\n"
" -c, --loop_count specify loop count\n"
" -f, --test_type specify test type: gather or scatter\n"
" -r, --node_rank node_rank of current process\n"
" -s, --node_size node_size or process count\n"
" -n, --num_gpu num_gpu per process\n"
" -a, --server_addr specify sideband server address\n"
" -p, --server_port specify sideband server port\n";
int c;
bool has_option = false;
while ((c = getopt_long(argc, argv, optstr, opts, NULL)) != -1) {
has_option = true;
switch (c) {
char* endptr;
long val;
case 'h': printf(usage, argv[0]); exit(EXIT_SUCCESS);
case 't':
val = strtol(optarg, &endptr, 10);
if (*endptr != '\0' || val < 0 || val > 3) {
printf("Invalid argument for option -t\n");
printf(usage, argv[0]);
exit(EXIT_FAILURE);
}
params.set_memory_type(static_cast<wholememory_memory_type_t>(val));
break;
case 'l':
val = strtol(optarg, &endptr, 10);
if (*endptr != '\0' || val < 0 || val > 2) {
printf("Invalid argument for option -l\n");
printf(usage, argv[0]);
exit(EXIT_FAILURE);
}
params.set_memory_location(static_cast<wholememory_memory_location_t>(val));
break;
case 'e':
val = std::stoll(optarg);
if (val < 0) {
printf("Negative value, invalid argument for option -e\n");
printf(usage, argv[0]);
exit(EXIT_FAILURE);
}
params.set_embedding_table_size(val);
break;
case 'g':
val = std::stoll(optarg);
if (val < 0) {
printf("Negative value, invalid argument for option -g\n");
printf(usage, argv[0]);
exit(EXIT_FAILURE);
}
params.set_gather_size(val);
break;
case 'd':
val = std::stoll(optarg);
if (val < 0) {
printf("Negative value, invalid argument for option -d\n");
printf(usage, argv[0]);
exit(EXIT_FAILURE);
}
params.set_embedding_dim(val);
break;
case 'c':
val = std::stoi(optarg);
if (val < 0) {
printf("Negative value, invalid argument for option -c\n");
printf(usage, argv[0]);
exit(EXIT_FAILURE);
}
params.set_loop_count(val);
break;
case 'f':
if (strcmp(optarg, "gather") == 0) {
params.set_test_type("gather");
} else if (strcmp(optarg, "scatter") == 0) {
params.set_test_type("scatter");
} else {
printf("Invalid argument for option -f\n");
printf(usage, argv[0]);
exit(EXIT_FAILURE);
}
break;
case 'a': params.set_server_addr(optarg); break;
case 'p':
val = std::atoi(optarg);
if (val < 0) {
printf("Negative value, invalid argument for option -p\n");
printf(usage, argv[0]);
exit(EXIT_FAILURE);
}
params.set_server_port(val);
break;
case 'r':
val = std::atoi(optarg);
if (val < 0) {
printf("Negative value, invalid argument for option -r\n");
printf(usage, argv[0]);
exit(EXIT_FAILURE);
}
params.set_node_rank(val);
break;
case 's':
val = std::atoi(optarg);
if (val < 0) {
printf("Negative value, invalid argument for option -s\n");
printf(usage, argv[0]);
exit(EXIT_FAILURE);
}
params.set_node_size(val);
break;
case 'n':
val = std::atoi(optarg);
if (val < 0) {
printf("Negative value, invalid argument for option -n\n");
printf(usage, argv[0]);
exit(EXIT_FAILURE);
}
params.set_num_gpu(val);
break;
default:
printf("Invalid or unrecognized option\n");
printf(usage, argv[0]);
exit(EXIT_FAILURE);
}
}
if (!has_option) { printf("No option or argument is passed, use the default param\n"); }
wholegraph::bench::gather_scatter::gather_scatter_benchmark(params);
return 0;
}
| 0 |
rapidsai_public_repos/wholegraph/cpp/bench
|
rapidsai_public_repos/wholegraph/cpp/bench/common/wholegraph_benchmark.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wholegraph_benchmark.hpp"
#include "wholememory/communicator.hpp"
#include <cstdint>
#include <experimental/functional>
#include <experimental/random>
#include <wholememory/tensor_description.h>
#include <wholememory/wholememory.h>
#include <functional>
#include <string>
#include <vector>
namespace wholegraph::bench {
template <typename IndexT>
void host_get_random_integer_indices(void* indices,
wholememory_array_description_t indice_desc,
int64_t max_indices)
{
IndexT* indices_ptr = static_cast<IndexT*>(indices);
std::experimental::reseed();
for (int64_t i = 0; i < indice_desc.size; i++) {
IndexT random_index = std::experimental::randint<IndexT>(0, max_indices - 1);
indices_ptr[i + indice_desc.storage_offset] = random_index;
}
}
void host_random_init_integer_indices(void* indices,
wholememory_array_description_t indices_desc,
int64_t max_indices)
{
if (indices_desc.dtype == WHOLEMEMORY_DT_INT) {
host_get_random_integer_indices<int>(indices, indices_desc, max_indices);
} else {
host_get_random_integer_indices<int64_t>(indices, indices_desc, max_indices);
}
}
void MultiProcessMeasurePerformance(std::function<void()> run_fn,
wholememory_comm_t& wm_comm,
const PerformanceMeter& meter,
const std::function<void()>& barrier_fn)
{
barrier_fn();
// warm up
struct timeval tv_warmup_s;
gettimeofday(&tv_warmup_s, nullptr);
int64_t target_warmup_time = 1000LL * 1000LL * meter.warmup_seconds;
while (true) {
struct timeval tv_warmup_c;
gettimeofday(&tv_warmup_c, nullptr);
int64_t time_warmup = TIME_DIFF_US(tv_warmup_s, tv_warmup_c);
if (time_warmup >= target_warmup_time) break;
run_fn();
WHOLEMEMORY_CHECK_NOTHROW(cudaDeviceSynchronize() == cudaSuccess);
}
WHOLEMEMORY_CHECK_NOTHROW(cudaDeviceSynchronize() == cudaSuccess);
barrier_fn();
// run
struct timeval tv_run_s, tv_run_e;
int64_t max_run_us = 1000LL * 1000LL * meter.max_run_seconds;
gettimeofday(&tv_run_s, nullptr);
int real_run_count = 0;
for (int i = 0; i < meter.run_count; i++) {
run_fn();
real_run_count++;
struct timeval tv_run_c;
gettimeofday(&tv_run_c, nullptr);
int64_t time_run_used = TIME_DIFF_US(tv_run_s, tv_run_c);
if (time_run_used >= max_run_us || real_run_count >= meter.run_count) break;
if (meter.sync) { WHOLEMEMORY_CHECK_NOTHROW(cudaDeviceSynchronize() == cudaSuccess); }
}
WHOLEMEMORY_CHECK_NOTHROW(cudaDeviceSynchronize() == cudaSuccess);
gettimeofday(&tv_run_e, nullptr);
int64_t real_time_used_us = TIME_DIFF_US(tv_run_s, tv_run_e);
double single_run_time_us = real_time_used_us;
single_run_time_us /= real_run_count;
barrier_fn();
for (size_t i = 0; i < meter.metrics_.size(); i++) {
double metric_value = meter.metrics_[i].value;
if (meter.metrics_[i].invert) {
metric_value *= single_run_time_us;
metric_value /= 1e6;
} else {
metric_value /= single_run_time_us;
metric_value *= 1e6;
}
std::vector<double> recv_vec(wm_comm->world_size);
wm_comm->host_allgather(&metric_value, recv_vec.data(), 1, WHOLEMEMORY_DT_DOUBLE);
double min_metric, max_metric, avg_metric;
min_metric = max_metric = recv_vec[0];
avg_metric = 0.0;
for (int j = 0; j < wm_comm->world_size; j++) {
min_metric = std::min(min_metric, recv_vec[j]);
max_metric = std::max(max_metric, recv_vec[j]);
avg_metric += recv_vec[j];
}
avg_metric /= wm_comm->world_size;
if (wm_comm->world_rank == 0) {
fprintf(stderr,
"== Metric: %20s: min=%.2lf %s,, max=%.2lf %s,, avg=%.2lf %s\n",
meter.metrics_[i].name.c_str(),
min_metric,
meter.metrics_[i].unit.c_str(),
max_metric,
meter.metrics_[i].unit.c_str(),
avg_metric,
meter.metrics_[i].unit.c_str());
}
}
}
} // namespace wholegraph::bench
| 0 |
rapidsai_public_repos/wholegraph/cpp/bench
|
rapidsai_public_repos/wholegraph/cpp/bench/common/wholegraph_benchmark.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
#include <cuda_runtime.h>
#include <sys/time.h>
#include <functional>
#include <string>
#include <vector>
#include "error.hpp"
#include <wholememory/tensor_description.h>
#include <wholememory/wholememory.h>
namespace wholegraph::bench {
#define TIME_DIFF_US(TVS, TVE) \
((TVE.tv_sec - TVS.tv_sec) * 1000ULL * 1000ULL + (TVE.tv_usec - TVS.tv_usec))
void host_random_init_integer_indices(void* indices,
wholememory_array_description_t indices_desc,
int64_t max_indices);
struct Metric {
Metric(const std::string& metrics_name,
const std::string& metrics_unit,
const double metrics_value,
bool inv)
{
name = metrics_name;
unit = metrics_unit;
value = metrics_value;
invert = inv;
}
std::string name;
std::string unit;
double value;
bool invert;
};
struct PerformanceMeter {
PerformanceMeter& SetSync()
{
sync = true;
return *this;
}
bool sync = false;
PerformanceMeter& SetWarmupTime(float w)
{
warmup_seconds = w;
return *this;
}
float warmup_seconds = 0.05f;
std::vector<Metric> metrics_;
PerformanceMeter& AddMetrics(const std::string& metrics_name,
const std::string& unit,
double value,
bool inv = false)
{
metrics_.emplace_back(metrics_name, unit, value, inv);
return *this;
}
PerformanceMeter& SetRunCount(int count)
{
run_count = count;
return *this;
}
int run_count = 100;
PerformanceMeter& SetMaxRunSeconds(float sec)
{
max_run_seconds = sec;
return *this;
}
float max_run_seconds = 10;
PerformanceMeter& SetName(const std::string& n)
{
name = n;
return *this;
}
std::string name;
};
void MultiProcessMeasurePerformance(std::function<void()> run_fn,
wholememory_comm_t& wm_comm,
const PerformanceMeter& meter,
const std::function<void()>& barrier_fn);
} // namespace wholegraph::bench
| 0 |
rapidsai_public_repos/wholegraph/cpp
|
rapidsai_public_repos/wholegraph/cpp/cmake/CodeChecker.cmake
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
function(add_code_checks)
set(options "")
set(oneValueArgs CWD CLANG_FORMAT CLANG_TIDY FLAKE8)
set(multiValueArgs "")
cmake_parse_arguments(code_checker
"${options}" "${oneValueArgs}" "${multiValueArgs}"
${ARGN})
# clang format checker
add_custom_target(clang-format
python scripts/run-clang-format.py
-exe ${code_checker_CLANG_FORMAT}
WORKING_DIRECTORY ${code_checker_CWD}
VERBATIM
COMMENT "Checks for code formatting using clang-format")
# clang format inplace fixer
add_custom_target(fix-clang-format
python scripts/run-clang-format.py
-inplace
-exe ${code_checker_CLANG_FORMAT}
WORKING_DIRECTORY ${code_checker_CWD}
VERBATIM
COMMENT "Fixes any code formatting issues using clang-format")
# clang tidy checker
add_custom_target(clang-tidy
python scripts/run-clang-tidy.py
-cdb ${PROJECT_BINARY_DIR}/compile_commands.json
-exe ${code_checker_CLANG_TIDY}
WORKING_DIRECTORY ${code_checker_CWD}
VERBATIM
COMMENT "Checks for coding conventions using clang-tidy")
# flake8
add_custom_target(flake8
${code_checker_FLAKE8} --exclude build*
WORKING_DIRECTORY ${code_checker_CWD}
VERBATIM
COMMENT "Checks for python coding conventions using flake8")
endfunction(add_code_checks)
| 0 |
rapidsai_public_repos/wholegraph/cpp/cmake
|
rapidsai_public_repos/wholegraph/cpp/cmake/thirdparty/get_nccl.cmake
|
#=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_nccl)
if(TARGET NCCL::NCCL)
return()
endif()
rapids_find_generate_module(NCCL
HEADER_NAMES nccl.h
LIBRARY_NAMES nccl
)
# Currently NCCL has no CMake build-system so we require
# it built and installed on the machine already
rapids_find_package(NCCL REQUIRED)
endfunction()
find_and_configure_nccl()
| 0 |
rapidsai_public_repos/wholegraph/cpp/cmake
|
rapidsai_public_repos/wholegraph/cpp/cmake/thirdparty/get_raft.cmake
|
#=============================================================================
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
set(WHOLEGRAPH_MIN_VERSION_raft "${RAPIDS_VERSION}.00")
set(WHOLEGRAPH_BRANCH_VERSION_raft "${RAPIDS_VERSION}")
function(find_and_configure_raft)
set(oneValueArgs VERSION FORK PINNED_TAG CLONE_ON_PIN)
cmake_parse_arguments(PKG "" "${oneValueArgs}" "" ${ARGN} )
if(PKG_CLONE_ON_PIN AND NOT PKG_PINNED_TAG STREQUAL "branch-${WHOLEGRAPH_BRANCH_VERSION_raft}")
message("Pinned tag found: ${PKG_PINNED_TAG}. Cloning raft locally.")
set(CPM_DOWNLOAD_raft ON)
endif()
rapids_cpm_find(raft ${PKG_VERSION}
GLOBAL_TARGETS raft::raft
BUILD_EXPORT_SET wholegraph-exports
INSTALL_EXPORT_SET wholegraph-exports
CPM_ARGS
EXCLUDE_FROM_ALL TRUE
GIT_REPOSITORY https://github.com/${PKG_FORK}/raft.git
GIT_TAG ${PKG_PINNED_TAG}
SOURCE_SUBDIR cpp
OPTIONS
"RAFT_COMPILE_LIBRARIES OFF"
"RAFT_COMPILE_DIST_LIBRARY OFF"
"BUILD_TESTS OFF"
"BUILD_BENCH OFF"
"RAFT_ENABLE_cuco_DEPENDENCY OFF"
)
if(raft_ADDED)
message(VERBOSE "WHOLEGRAPH: Using RAFT located in ${raft_SOURCE_DIR}")
else()
message(VERBOSE "WHOLEGRAPH: Using RAFT located in ${raft_DIR}")
endif()
endfunction()
# Change pinned tag and fork here to test a commit in CI
# To use a different RAFT locally, set the CMake variable
# CPM_raft_SOURCE=/path/to/local/raft
find_and_configure_raft(VERSION ${WHOLEGRAPH_MIN_VERSION_raft}
FORK rapidsai
PINNED_TAG branch-${WHOLEGRAPH_BRANCH_VERSION_raft}
# When PINNED_TAG above doesn't match wholegraph,
# force local raft clone in build directory
# even if it's already installed.
CLONE_ON_PIN ON
)
| 0 |
rapidsai_public_repos/wholegraph/cpp/cmake
|
rapidsai_public_repos/wholegraph/cpp/cmake/thirdparty/get_gtest.cmake
|
#=============================================================================
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_gtest)
include(${rapids-cmake-dir}/cpm/gtest.cmake)
rapids_cpm_gtest()
endfunction()
find_and_configure_gtest()
| 0 |
rapidsai_public_repos/wholegraph/cpp
|
rapidsai_public_repos/wholegraph/cpp/docs/DEVELOPER_GUIDE.md
|
# wholegraph C++ Developer Guide
This document serves as a guide for contributors to wholegraph C++ code. Developers should also refer
to these additional files for further documentation of wholegraph best practices.
* [Documentation Guide](TODO) for guidelines on documenting wholegraph code.
* [Testing Guide](TODO) for guidelines on writing unit tests.
* [Benchmarking Guide](TODO) for guidelines on writing unit benchmarks.
# Overview
wholegraph includes a C++ library that provides GPU-accelerated graph algorithms for processing
sparse graphs.
## Lexicon
This section defines terminology used within wholegraph
### COO
COOrdinate format is one of the standard formats for representing graph data. In COO format the
graph is represented as an array of source vertex ids, an array of destination vertex ids, and an
optional array of edge weights. Edge i is identified by source_vertex_id[i], destination_vertex_id[i]
and weight[i].
### MORE
# Directory Structure and File Naming
External/public wholegraph APIs are grouped based on functionality into an appropriately titled
header file in `wholegraph/cpp/include/`. For example, `wholegraph/cpp/include/graph.hpp`
contains the definition of the (legacy) graph objects. Note the `.hpp`
file extension used to indicate a C++ header file.
Header files should use the `#pragma once` include guard.
## File extensions
- `.hpp` : C++ header files
- `.cpp` : C++ source files
- `.cu` : CUDA C++ source files
- `.cuh` : Headers containing CUDA device code
Header files and source files should use `.hpp` and `.cpp` extensions unless they must
be compiled by nvcc. `.cu` and `.cuh` files are more expensive to compile, so we want
to minimize the use of these files to only when necessary. A good indicator of the need
to use a `.cu` or `.cuh` file is the inclusion of `__device__` and other
symbols that are only recognized by `nvcc`. Another indicator is Thrust
algorithm APIs with a device execution policy (always `rmm::exec_policy` in wholegraph).
## Code and Documentation Style and Formatting
wholegraph code uses [snake_case](https://en.wikipedia.org/wiki/Snake_case) for all names except in a
few cases: unit tests and test case names may use Pascal case, aka
[UpperCamelCase](https://en.wikipedia.org/wiki/Camel_case). We do not use
[Hungarian notation](https://en.wikipedia.org/wiki/Hungarian_notation), except for the following examples:
* device data variables should be prefaced by d_ if it makes the intent clearer
* host data variables should be prefaced by h_ if it makes the intent clearer
* template parameters defining a type should be suffixed with _t
* private member variables are typically suffixed with an underscore
```c++
template <typename graph_t>
void algorithm_function(graph_t const &g)
{
...
}
template <typename vertex_t>
class utility_class
{
...
private:
vertex_t num_vertices_{};
}
```
C++ formatting is enforced using `clang-format`. You should configure `clang-format` on your
machine to use the `wholegraph/cpp/.clang-format` configuration file, and run `clang-format` on all
changed code before committing it. The easiest way to do this is to configure your editor to
"format on save".
Aspects of code style not discussed in this document and not automatically enforceable are typically
caught during code review, or not enforced.
### C++ Guidelines
In general, we recommend following
[C++ Core Guidelines](https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines). We also
recommend watching Sean Parent's [C++ Seasoning talk](https://www.youtube.com/watch?v=W2tWOdzgXHA),
and we try to follow his rules: "No raw loops. No raw pointers. No raw synchronization primitives."
* Prefer algorithms from STL and Thrust to raw loops.
* Prefer wholegraph and RMM to raw pointers and raw memory allocation.
Documentation is discussed in the [Documentation Guide](TODO).
### Includes
The following guidelines apply to organizing `#include` lines.
* Group includes by library (e.g. wholegraph, RMM, Thrust, STL). `clang-format` will respect the
groupings and sort the individual includes within a group lexicographically.
* Separate groups by a blank line.
* Order the groups from "nearest" to "farthest". In other words, local includes, then includes
from other RAPIDS libraries, then includes from related libraries, like `<thrust/...>`, then
includes from dependencies installed with wholegraph, and then standard headers (for example `<string>`,
`<iostream>`).
* Use <> instead of "" unless the header is in the same directory as the source file.
* Tools like `clangd` often auto-insert includes when they can, but they usually get the grouping
and brackets wrong.
* Always check that includes are only necessary for the file in which they are included.
Try to avoid excessive including especially in header files. Double check this when you remove
code.
# wholegraph Data Structures
Application data in wholegraph is contained in graph objects, but there are a variety of other
data structures you will use when developing wholegraph code.
## Views and Ownership
Resource ownership is an essential concept in wholegraph. In short, an "owning" object owns a
resource (such as device memory). It acquires that resource during construction and releases the
resource in destruction ([RAII](https://en.cppreference.com/w/cpp/language/raii)). A "non-owning"
object does not own resources. Any class in wholegraph with the `*_view` suffix is non-owning.
## `rmm::device_memory_resource`<a name="memory_resource"></a>
wholegraph allocates all device memory via RMM memory resources (MR). See the
[RMM documentation](https://github.com/rapidsai/rmm/blob/main/README.md) for details.
## Streams
CUDA streams are not yet exposed in external wholegraph APIs.
We are currently investigating the best technique for exposing this.
### Memory Management
wholegraph code generally eschews raw pointers and direct memory allocation. Use RMM classes built to
use `device_memory_resource`(*)s for device memory allocation with automated lifetime management.
#### `rmm::device_buffer`
Allocates a specified number of bytes of untyped, uninitialized device memory using a
`device_memory_resource`. If no resource is explicitly provided, uses
`rmm::mr::get_current_device_resource()`.
`rmm::device_buffer` is movable and copyable on a stream. A copy performs a deep copy of the
`device_buffer`'s device memory on the specified stream, whereas a move moves ownership of the
device memory from one `device_buffer` to another.
```c++
// Allocates at least 100 bytes of uninitialized device memory
// using the specified resource and stream
rmm::device_buffer buff(100, stream, mr);
void * raw_data = buff.data(); // Raw pointer to underlying device memory
// Deep copies `buff` into `copy` on `stream`
rmm::device_buffer copy(buff, stream);
// Moves contents of `buff` into `moved_to`
rmm::device_buffer moved_to(std::move(buff));
custom_memory_resource *mr...;
// Allocates 100 bytes from the custom_memory_resource
rmm::device_buffer custom_buff(100, mr, stream);
```
#### `rmm::device_uvector<T>`
Similar to a `rmm::device_vector`, allocates a contiguous set of elements in device memory but with
key differences:
- As an optimization, elements are uninitialized and no synchronization occurs at construction.
This limits the types `T` to trivially copyable types.
- All operations are stream ordered (i.e., they accept a `cuda_stream_view` specifying the stream
on which the operation is performed).
## Namespaces
### External
All public wholegraph APIs should be placed in the `wholegraph` namespace. Example:
```c++
namespace wholegraph{
void public_function(...);
} // namespace wholegraph
```
### Internal
Many functions are not meant for public use, so place them in either the `detail` or an *anonymous*
namespace, depending on the situation.
#### `detail` namespace
Functions or objects that will be used across *multiple* translation units (i.e., source files),
should be exposed in an internal header file and placed in the `detail` namespace. Example:
```c++
// some_utilities.hpp
namespace wholegraph{
namespace detail{
void reusable_helper_function(...);
} // namespace detail
} // namespace wholegraph
```
#### Anonymous namespace
Functions or objects that will only be used in a *single* translation unit should be defined in an
*anonymous* namespace in the source file where it is used. Example:
```c++
// some_file.cpp
namespace{
void isolated_helper_function(...);
} // anonymous namespace
```
[**Anonymous namespaces should *never* be used in a header file.**](https://wiki.sei.cmu.edu/confluence/display/cplusplus/DCL59-CPP.+Do+not+define+an+unnamed+namespace+in+a+header+file)
# Error Handling
wholegraph follows conventions (and provides utilities) enforcing compile-time and run-time
conditions and detecting and handling CUDA errors. Communication of errors is always via C++
exceptions.
## Runtime Conditions
Use the `wholegraph_EXPECTS` macro to enforce runtime conditions necessary for correct execution.
Example usage:
```c++
wholegraph_EXPECTS(lhs.type() == rhs.type(), "Column type mismatch");
```
The first argument is the conditional expression expected to resolve to `true` under normal
conditions. If the conditional evaluates to `false`, then an error has occurred and an instance of `wholegraph::logic_error` is thrown. The second argument to `wholegraph_EXPECTS` is a short description of the
error that has occurred and is used for the exception's `what()` message.
There are times where a particular code path, if reached, should indicate an error no matter what.
For example, often the `default` case of a `switch` statement represents an invalid alternative.
Use the `wholegraph_FAIL` macro for such errors. This is effectively the same as calling
`wholegraph_EXPECTS(false, reason)`.
Example:
```c++
wholegraph_FAIL("This code path should not be reached.");
```
### CUDA Error Checking
Use the `CUDA_TRY` macro to check for the successful completion of CUDA runtime API functions. This
macro throws a `wholegraph::cuda_error` exception if the CUDA API return value is not `cudaSuccess`. The
thrown exception includes a description of the CUDA error code in it's `what()` message.
Example:
```c++
CUDA_TRY( cudaMemcpy(&dst, &src, num_bytes) );
```
## Compile-Time Conditions
Use `static_assert` to enforce compile-time conditions. For example,
```c++
template <typename T>
void trivial_types_only(T t){
static_assert(std::is_trivial<T>::value, "This function requires a trivial type.");
...
}
```
# Data Types
TBD
# Type Dispatcher
TBD
| 0 |
rapidsai_public_repos/wholegraph/cpp
|
rapidsai_public_repos/wholegraph/cpp/src/net_utils.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "net_utils.h"
#include <arpa/inet.h>
#include <fcntl.h>
#include <netdb.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
#include <string>
#include "cuda_macros.hpp"
static void ResolveHostName(sockaddr_in* saddr, const std::string& host_name, int port)
{
addrinfo hints = {0, AF_INET, SOCK_STREAM, IPPROTO_TCP, 0, nullptr, nullptr, nullptr};
addrinfo* res;
char port_buf[16];
snprintf(port_buf, 16, "%d", port);
int ret = getaddrinfo(host_name.c_str(), port_buf, &hints, &res);
if (ret != 0) {
printf("Resolve IP for host %s failed.\n", host_name.c_str());
abort();
}
*saddr = *(sockaddr_in*)(res->ai_addr);
}
int CreateServerListenFd(int port)
{
int server_sock = socket(AF_INET, SOCK_STREAM, 0);
WHOLEMEMORY_CHECK_NOTHROW(server_sock >= 0);
int enable = 1;
WHOLEMEMORY_CHECK_NOTHROW(
setsockopt(server_sock, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(int)) == 0);
// Binding
sockaddr_in server_addr;
memset(&server_addr, 0, sizeof(sockaddr_in));
server_addr.sin_family = AF_INET;
server_addr.sin_port = htons(port);
server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
WHOLEMEMORY_CHECK_NOTHROW(bind(server_sock, (sockaddr*)&server_addr, sizeof(server_addr)) == 0);
return server_sock;
}
void ServerListen(int listen_fd, int backlog)
{
WHOLEMEMORY_CHECK_NOTHROW(listen(listen_fd, backlog) == 0);
}
int ServerAccept(int listen_fd, sockaddr_in* client_addr, socklen_t* client_addr_len)
{
int client_sock = accept(listen_fd, (sockaddr*)client_addr, client_addr_len);
return client_sock;
}
int CreateClientFd(const std::string& server_name, int server_port)
{
int client_sock = socket(AF_INET, SOCK_STREAM, 0);
WHOLEMEMORY_CHECK_NOTHROW(client_sock >= 0);
sockaddr_in server_addr;
ResolveHostName(&server_addr, server_name, server_port);
WHOLEMEMORY_CHECK_NOTHROW(server_addr.sin_family == AF_INET);
WHOLEMEMORY_CHECK_NOTHROW(server_addr.sin_port == htons(server_port));
#if 0
inet_pton(AF_INET, server_name.c_str(), &server_addr.sin_addr);
#endif
while (connect(client_sock, (sockaddr*)&server_addr, sizeof(server_addr)) < 0) {
switch (errno) {
case ECONNREFUSED:
// std::cerr << "Server may not running, waiting..." << std::endl;
break;
case ETIMEDOUT: printf("Connecting timeout retrying...\n"); break;
case ENETUNREACH: printf("Network unreachable, retrying...\n"); break;
default: printf("unknow error %d, retrying...\n", errno); break;
}
usleep(500 * 1000);
}
return client_sock;
}
void SingleSend(int sock_fd, const void* send_data, size_t send_size)
{
ssize_t bytes_send = send(sock_fd, send_data, send_size, 0);
if (bytes_send < 0) {
printf("recv returned %ld, errno=%d %s\n", bytes_send, errno, strerror(errno));
}
WHOLEMEMORY_CHECK_NOTHROW(bytes_send == send_size);
}
void SingleRecv(int sock_fd, void* recv_data, size_t recv_size)
{
ssize_t bytes_received = recv(sock_fd, recv_data, recv_size, 0);
if (bytes_received < 0) {
printf("recv returned %ld, errno=%d %s\n", bytes_received, errno, strerror(errno));
}
WHOLEMEMORY_CHECK_NOTHROW(bytes_received == recv_size);
}
| 0 |
rapidsai_public_repos/wholegraph/cpp
|
rapidsai_public_repos/wholegraph/cpp/src/parallel_utils.cpp
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parallel_utils.hpp"
#include <cuda_runtime_api.h>
#include <string.h>
#include <unistd.h>
#include <wait.h>
#include <atomic>
#include <iostream>
#include <memory>
#include <set>
#include <thread>
#include <vector>
#include "cuda_macros.hpp"
#include "net_utils.h"
void MultiThreadRun(int size, std::function<void(int, int)> f)
{
std::vector<std::unique_ptr<std::thread>> threads(size);
for (int i = 0; i < size; i++) {
threads[i] = std::make_unique<std::thread>([f, i, size] { return f(i, size); });
}
for (int i = 0; i < size; i++) {
threads[i]->join();
}
}
int GetProcessorCount() { return static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN)); }
void MultiProcessRun(int world_size, std::function<void(int, int)> f, bool inline_single_process)
{
if (world_size == 1 && inline_single_process) {
f(0, 1);
return;
}
// This variable is added to prevent from calling MultiProcessRun recursively by mistake,
// which may fork too many process and lead to system crash.
static std::atomic<int64_t> running_count(0);
std::vector<pid_t> pids(world_size);
static bool is_child = false;
int child_idx = 0;
int current_running_count = running_count.fetch_add(1);
if (current_running_count > 0 || is_child) {
if (!is_child) running_count.fetch_sub(1);
WHOLEMEMORY_FATAL("Already have MultiProcessRun, running_count=%d, %s child process",
current_running_count,
is_child ? "is" : "not");
}
for (; child_idx < world_size; child_idx++) {
pids[child_idx] = fork();
if (pids[child_idx] == -1) {
WHOLEMEMORY_ERROR("fork failed.");
break;
}
if (pids[child_idx] == 0) {
is_child = true;
f(child_idx, world_size);
exit(0);
}
}
if (child_idx != world_size) {
for (int i = 0; i < child_idx; i++) {
// kill all launched child process in case they may wait for each other.
kill(pids[i], SIGKILL);
int wstatus;
pid_t pid_ret = waitpid(pids[i], &wstatus, 0);
}
WHOLEMEMORY_FATAL("MultiProcessRun failed.");
}
for (int i = 0; i < world_size; i++) {
int wstatus;
pid_t pid_ret = waitpid(pids[i], &wstatus, 0);
if (pid_ret != pids[i]) {
running_count.fetch_sub(1);
WHOLEMEMORY_FATAL(
"Rank %d returned pid %d not equal to pid %d", i, (int)pid_ret, (int)pids[i]);
}
if ((!WIFEXITED(wstatus)) || (WEXITSTATUS(wstatus) != 0)) {
running_count.fetch_sub(1);
WHOLEMEMORY_FATAL("Rank %d exit with error", i);
}
}
running_count.fetch_sub(1);
}
class SideBandCommunicator {
public:
SideBandCommunicator(int world_rank, int world_size, const char* server_addr, int port);
~SideBandCommunicator();
void Start();
void Stop();
void GroupAllToAll(const void* input, void* output, size_t element_size, int group_count = 1);
void GroupAllGather(const void* input, void* output, size_t element_size, int group_count = 1);
void GroupBroadcast(void* data, size_t element_size, int root_group_rank, int group_count = 1);
void Barrier();
private:
static constexpr int kSideBandMagic = 0x51debacd;
void ServerAcceptFunc();
int world_rank_ = -1;
int world_size_ = 0;
std::string server_address_;
int server_port_ = -1;
int client_fd_ = -1;
std::vector<int> server_fds_;
std::thread server_thread_;
};
SideBandCommunicator::SideBandCommunicator(int world_rank,
int world_size,
const char* server_addr,
int port)
: server_address_(server_addr),
server_port_(port),
world_rank_(world_rank),
world_size_(world_size)
{
}
SideBandCommunicator::~SideBandCommunicator() {}
void SideBandCommunicator::Start()
{
server_fds_.resize(world_size_, -1);
std::thread server_accept_thread;
if (world_rank_ == 0) {
server_accept_thread = std::thread([this]() { this->ServerAcceptFunc(); });
}
client_fd_ = CreateClientFd(server_address_, server_port_);
int send_data[2];
send_data[0] = kSideBandMagic;
send_data[1] = world_rank_;
SingleSend(client_fd_, &send_data[0], sizeof(int) * 2);
int magic_number = 0;
SingleRecv(client_fd_, &magic_number, sizeof(int));
WHOLEMEMORY_CHECK_NOTHROW(magic_number == kSideBandMagic);
if (world_rank_ == 0) { server_accept_thread.join(); }
WHOLEMEMORY_INFO("[Client] Rank=%d connected to server.", world_rank_);
}
void SideBandCommunicator::Stop()
{
WHOLEMEMORY_CHECK_NOTHROW(close(client_fd_) == 0);
client_fd_ = -1;
if (world_rank_ == 0) {
for (int i = 0; i < world_size_; i++) {
WHOLEMEMORY_CHECK_NOTHROW(close(server_fds_[i]) == 0);
server_fds_[i] = -1;
}
server_fds_.clear();
}
}
void SideBandCommunicator::ServerAcceptFunc()
{
int server_listen_fd = CreateServerListenFd(server_port_);
// Listening
ServerListen(server_listen_fd, world_size_);
std::set<int> unconnected_rank_set;
for (int i = 0; i < world_size_; i++) {
unconnected_rank_set.insert(i);
}
while (!unconnected_rank_set.empty()) {
sockaddr_in client_addr;
socklen_t client_addr_len = sizeof(client_addr);
int client_sock = accept(server_listen_fd, (sockaddr*)&client_addr, &client_addr_len);
if (client_sock >= 0) {
int recv_data[2];
SingleRecv(client_sock, &recv_data[0], sizeof(int) * 2);
WHOLEMEMORY_CHECK_NOTHROW(recv_data[0] == kSideBandMagic);
int rank_id = recv_data[1];
WHOLEMEMORY_CHECK_NOTHROW(rank_id >= 0 && rank_id < world_size_);
WHOLEMEMORY_CHECK_NOTHROW(unconnected_rank_set.count(rank_id) > 0);
server_fds_[rank_id] = client_sock;
unconnected_rank_set.erase(rank_id);
WHOLEMEMORY_INFO("[Server] Rank %d connected to SideBandCommunicator", rank_id);
}
}
WHOLEMEMORY_CHECK_NOTHROW(close(server_listen_fd) == 0);
WHOLEMEMORY_INFO("[Server] All ranks connected to SideBandCommunicator");
for (int i = 0; i < world_size_; i++) {
int send_data[2];
send_data[0] = kSideBandMagic;
send_data[1] = i;
SingleSend(server_fds_[i], &send_data[0], sizeof(int));
}
}
void SideBandCommunicator::GroupAllToAll(const void* input,
void* output,
size_t element_size,
int group_count)
{
WHOLEMEMORY_CHECK_NOTHROW(world_size_ % group_count == 0);
int group_size = world_size_ / group_count;
SingleSend(client_fd_, input, element_size * group_size);
if (world_rank_ == 0) {
std::vector<char> recv_buffer(element_size * group_size);
std::vector<std::vector<char>> send_buffer(group_size);
for (int i = 0; i < group_size; i++) {
send_buffer[i].resize(element_size * group_size);
}
for (int group_id = 0; group_id < group_count; group_id++) {
for (int src_rank = group_id * group_size; src_rank < (group_id + 1) * group_size;
src_rank++) {
SingleRecv(server_fds_[src_rank], recv_buffer.data(), recv_buffer.size());
for (int gr = 0; gr < group_size; gr++) {
int src_gr = src_rank - group_id * group_size;
memcpy(send_buffer[gr].data() + src_gr * element_size,
recv_buffer.data() + gr * element_size,
element_size);
}
for (int dst_gr = 0; dst_gr < group_size; dst_gr++) {
int r = dst_gr + group_id * group_size;
SingleSend(server_fds_[r], send_buffer[dst_gr].data(), send_buffer[dst_gr].size());
}
}
}
}
SingleRecv(client_fd_, output, element_size * group_size);
}
void SideBandCommunicator::GroupAllGather(const void* input,
void* output,
size_t element_size,
int group_count)
{
WHOLEMEMORY_CHECK_NOTHROW(world_size_ % group_count == 0);
int group_size = world_size_ / group_count;
SingleSend(client_fd_, input, element_size);
if (world_rank_ == 0) {
std::vector<char> recv_buffer(element_size);
std::vector<std::vector<char>> send_buffer(group_size);
for (int i = 0; i < group_size; i++) {
send_buffer[i].resize(element_size * group_size);
}
for (int group_id = 0; group_id < group_count; group_id++) {
for (int src_rank = group_id * group_size; src_rank < (group_id + 1) * group_size;
src_rank++) {
SingleRecv(server_fds_[src_rank], recv_buffer.data(), recv_buffer.size());
for (int gr = 0; gr < group_size; gr++) {
int src_gr = src_rank - group_id * group_size;
memcpy(send_buffer[gr].data() + src_gr * element_size, recv_buffer.data(), element_size);
}
for (int dst_gr = 0; dst_gr < group_size; dst_gr++) {
int r = dst_gr + group_id * group_size;
SingleSend(server_fds_[r], send_buffer[dst_gr].data(), send_buffer[dst_gr].size());
}
}
}
}
SingleRecv(client_fd_, output, element_size * group_size);
}
void SideBandCommunicator::GroupBroadcast(void* data,
size_t element_size,
int root_group_rank,
int group_count)
{
WHOLEMEMORY_CHECK_NOTHROW(world_size_ % group_count == 0);
int group_size = world_size_ / group_count;
int group_rank = world_rank_ % group_size;
if (group_rank == root_group_rank) { SingleSend(client_fd_, data, element_size); }
if (world_rank_ == 0) {
std::vector<char> recv_buffer(element_size);
for (int group_id = 0; group_id < group_count; group_id++) {
int src_rank = group_id * group_size + root_group_rank;
SingleRecv(server_fds_[src_rank], recv_buffer.data(), recv_buffer.size());
for (int r = group_id * group_size; r < (group_id + 1) * group_size; r++) {
SingleSend(server_fds_[r], recv_buffer.data(), recv_buffer.size());
}
}
}
SingleRecv(client_fd_, data, element_size);
}
void SideBandCommunicator::Barrier()
{
int data = 0;
std::vector<int> recv_data(world_size_);
GroupAllGather(&data, recv_data.data(), sizeof(int), 1);
}
SideBandCommunicator* StartSidebandCommunicator(int world_rank,
int world_size,
const char* server_addr,
int port)
{
auto* side_band_communicator =
new SideBandCommunicator(world_rank, world_size, server_addr, port);
side_band_communicator->Start();
return side_band_communicator;
}
void SideBandAllToAll(SideBandCommunicator* side_band_communicator,
const void* input,
void* output,
size_t element_size)
{
side_band_communicator->GroupAllToAll(input, output, element_size);
}
void SideBandAllGather(SideBandCommunicator* side_band_communicator,
const void* input,
void* output,
size_t element_size)
{
side_band_communicator->GroupAllGather(input, output, element_size);
}
void SideBandBroadcast(SideBandCommunicator* side_band_communicator,
void* data,
size_t element_size,
int root_rank)
{
side_band_communicator->GroupBroadcast(data, element_size, root_rank);
}
void ShutDownSidebandCommunicator(SideBandCommunicator* side_band_communicator)
{
side_band_communicator->Stop();
delete side_band_communicator;
}
int ForkGetDeviceCount()
{
static int s_device_count = -1;
if (s_device_count >= 0) { return s_device_count; }
int pipes[2];
if (pipe(pipes) == -1) {
WHOLEMEMORY_ERROR("Create pipe failed.");
return -1;
}
pid_t pid = fork();
if (pid == -1) {
WHOLEMEMORY_ERROR("fork failed.");
return -1;
}
if (pid == 0) {
int dev_count = -1;
WM_CUDA_CHECK(cudaGetDeviceCount(&dev_count));
WHOLEMEMORY_CHECK(close(pipes[0]) == 0);
auto wret = write(pipes[1], &dev_count, sizeof(int));
if (wret != sizeof(int)) { WHOLEMEMORY_FATAL("write dev_count to pipe failed."); }
WHOLEMEMORY_CHECK(close(pipes[1]) == 0);
exit(0);
} else {
int dev_count = -1;
WHOLEMEMORY_CHECK(close(pipes[1]) == 0);
auto rret = read(pipes[0], &dev_count, sizeof(int));
if (rret != sizeof(int)) { WHOLEMEMORY_FATAL("read dev_count from pipe failed."); }
WHOLEMEMORY_CHECK(close(pipes[0]) == 0);
int wstatus;
pid_t pid_ret = waitpid(pid, &wstatus, 0);
if (pid_ret != pid) { WHOLEMEMORY_FATAL("wait dev_count process failed."); }
s_device_count = dev_count;
return dev_count;
}
}
| 0 |
rapidsai_public_repos/wholegraph/cpp
|
rapidsai_public_repos/wholegraph/cpp/src/cuda_macros.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <execinfo.h>
#include <iomanip>
#include <iostream>
#include <memory>
#include <mutex>
#include <raft/core/error.hpp>
#include "error.hpp"
namespace wholememory {
/**
* @brief Exception thrown when a CUDA error is encountered.
*/
struct cuda_error : public raft::exception {
explicit cuda_error(char const* const message) : raft::exception(message) {}
explicit cuda_error(std::string const& message) : raft::exception(message) {}
};
/**
* @brief Exception thrown when a CUDA driver error is encountered.
*/
struct cu_error : public raft::exception {
explicit cu_error(char const* const message) : raft::exception(message) {}
explicit cu_error(std::string const& message) : raft::exception(message) {}
};
} // namespace wholememory
/**
* @brief Error checking macro for CUDA runtime API functions.
*
* Invokes a CUDA runtime API function call, if the call does not return
* cudaSuccess, invokes cudaGetLastError() to clear the error and throws an
* exception detailing the CUDA error that occurred
*
*/
#define WM_CUDA_TRY(call) \
do { \
cudaError_t const status = call; \
if (status != cudaSuccess) { \
cudaGetLastError(); \
std::string msg{}; \
SET_WHOLEMEMORY_ERROR_MSG(msg, \
"CUDA error encountered at: ", \
"call='%s', Reason=%s:%s", \
#call, \
cudaGetErrorName(status), \
cudaGetErrorString(status)); \
throw wholememory::cuda_error(msg); \
} \
} while (0)
#ifndef WM_CUDA_CHECK
#define WM_CUDA_CHECK(call) WM_CUDA_TRY(call)
#endif
// /**
// * @brief check for cuda runtime API errors but log error instead of raising
// * exception.
// */
#define WM_CUDA_TRY_NO_THROW(call) \
do { \
cudaError_t const status = call; \
if (cudaSuccess != status) { \
printf("CUDA call='%s' at file=%s line=%d failed with %s\n", \
#call, \
__FILE__, \
__LINE__, \
cudaGetErrorString(status)); \
abort(); \
} \
} while (0)
#ifndef WM_CUDA_CHECK_NO_THROW
#define WM_CUDA_CHECK_NO_THROW(call) WM_CUDA_TRY_NO_THROW(call)
#endif
/**
* @brief Error checking macro for CUDA driver API functions.
*
* Invokes a CUDA driver API function call, if the call does not return
* CUDA_SUCCESS, invokes cuGetErrorString() to clear the error and throws an
* exception detailing the CU error that occurred
*
*/
#define WM_CU_TRY(call) \
do { \
CUresult const status = call; \
if (status != CUDA_SUCCESS) { \
const char* p_err_name = nullptr; \
cuGetErrorName(status, &p_err_name); \
const char* p_err_str = nullptr; \
if (cuGetErrorString(status, &p_err_str) == CUDA_ERROR_INVALID_VALUE) { \
p_err_str = "Unrecoginzed CU error num"; \
} \
std::string msg{}; \
SET_WHOLEMEMORY_ERROR_MSG(msg, \
"CU error encountered at: ", \
"call='%s', Reason=%s:%s", \
#call, \
p_err_name, \
p_err_str); \
throw wholememory::cu_error(msg); \
} \
} while (0)
#ifndef WM_CU_CHECK
#define WM_CU_CHECK(call) WM_CU_TRY(call)
#endif
// /**
// * @brief check for cuda driver API errors but log error instead of raising
// * exception.
// */
#define WM_CU_TRY_NO_THROW(call) \
do { \
CUresult const status = call; \
if (status != CUDA_SUCCESS) { \
const char* p_err_str = nullptr; \
if (cuGetErrorString(status, &p_err_str) == CUDA_ERROR_INVALID_VALUE) { \
p_err_str = "Unrecoginzed CU error num"; \
} \
std::string msg{}; \
printf( \
"CU call='%s' at file=%s line=%d failed with %s\n", #call, __FILE__, __LINE__, p_err_str); \
} \
} while (0)
#ifndef WM_CU_CHECK_NO_THROW
#define WM_CU_CHECK_NO_THROW(call) WM_CU_TRY_NO_THROW(call)
#endif
void set_debug_sync_mode(bool debug_sync_mode);
namespace wholememory {
void debug_synchronize(const char* filename, int line, cudaStream_t stream);
}
#define WM_CUDA_DEBUG_SYNC_STREAM(S) wholememory::debug_synchronize(__FILE__, __LINE__, (S))
| 0 |
rapidsai_public_repos/wholegraph/cpp
|
rapidsai_public_repos/wholegraph/cpp/src/logger.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "logger.hpp"
#include <cstdio>
#include <memory>
#include <string>
namespace wholememory {
int& get_log_level()
{
static int log_level = LEVEL_INFO;
return log_level;
}
void set_log_level(int lev) { get_log_level() = lev; }
bool will_log_for(int lev) { return lev <= get_log_level(); }
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp
|
rapidsai_public_repos/wholegraph/cpp/src/logger.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdarg>
#include <iostream>
#include <string>
#include <cassert>
#include <raft/core/error.hpp>
#include "error.hpp"
namespace wholememory {
static constexpr int LEVEL_FATAL = 0;
static constexpr int LEVEL_ERROR = 10;
static constexpr int LEVEL_WARN = 100;
static constexpr int LEVEL_INFO = 1000;
static constexpr int LEVEL_DEBUG = 10000;
static constexpr int LEVEL_TRACE = 100000;
int& get_log_level();
void set_log_level(int lev);
bool will_log_for(int lev);
/**
* @defgroup CStringFormat Expand a C-style format string
*
* @brief Expands C-style formatted string into std::string
*
* @param[in] fmt format string
* @param[in] vl respective values for each of format modifiers in the string
*
* @return the expanded `std::string`
*
* @{
*/
inline std::string format(const char* fmt, va_list& vl)
{
va_list vl_copy;
va_copy(vl_copy, vl);
int length = std::vsnprintf(nullptr, 0, fmt, vl_copy);
assert(length >= 0);
std::vector<char> buf(length + 1);
(void)std::vsnprintf(buf.data(), length + 1, fmt, vl);
return std::string(buf.data());
}
inline std::string format(const char* fmt, ...)
{
va_list vl;
va_start(vl, fmt);
std::string str = wholememory::format(fmt, vl);
va_end(vl);
return str;
}
/** @} */
#define WHOLEMEMORY_LOG(lev, fmt, ...) \
do { \
if (wholememory::will_log_for(lev)) \
std::cout << wholememory::format(fmt, ##__VA_ARGS__) << std::endl << std::flush; \
} while (0)
#define WHOLEMEMORY_FATAL(fmt, ...) \
do { \
std::string fatal_msg{}; \
SET_WHOLEMEMORY_ERROR_MSG(fatal_msg, "WholeMemory FATAL at ", fmt, ##__VA_ARGS__); \
throw wholememory::logic_error(fatal_msg); \
} while (0)
#define WHOLEMEMORY_ERROR(fmt, ...) WHOLEMEMORY_LOG(wholememory::LEVEL_ERROR, fmt, ##__VA_ARGS__)
#define WHOLEMEMORY_WARN(fmt, ...) WHOLEMEMORY_LOG(wholememory::LEVEL_WARN, fmt, ##__VA_ARGS__)
#define WHOLEMEMORY_INFO(fmt, ...) WHOLEMEMORY_LOG(wholememory::LEVEL_INFO, fmt, ##__VA_ARGS__)
#define WHOLEMEMORY_DEBUG(fmt, ...) WHOLEMEMORY_LOG(wholememory::LEVEL_DEBUG, fmt, ##__VA_ARGS__)
#define WHOLEMEMORY_TRACE(fmt, ...) WHOLEMEMORY_LOG(wholememory::LEVEL_TRACE, fmt, ##__VA_ARGS__)
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp
|
rapidsai_public_repos/wholegraph/cpp/src/net_utils.h
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <netinet/in.h>
#include <sys/socket.h>
#include <string>
int CreateServerListenFd(int port);
void ServerListen(int listen_fd, int backlog = 10);
int ServerAccept(int listen_fd, sockaddr_in* client_addr, socklen_t* client_addr_len);
int CreateClientFd(const std::string& server_name, int server_port);
void SingleSend(int sock_fd, const void* send_data, size_t send_size);
void SingleRecv(int sock_fd, void* recv_data, size_t recv_size);
| 0 |
rapidsai_public_repos/wholegraph/cpp
|
rapidsai_public_repos/wholegraph/cpp/src/parallel_utils.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <unistd.h>
#include <functional>
#include <vector>
#include "logger.hpp"
/**
* Run f with size threads
* @param size : thread count
* @param f : thread function
*/
void MultiThreadRun(int size, std::function<void(int, int)> f);
/**
* Get processor count of the machine.
* @return : processor count
*/
int GetProcessorCount();
/**
* Run f with size processes
* @note when using gtest with MultiProcessRun, testing::Test::HasFailure()
* need to be called before f return and modify exit code according to if has
* gtest failures. See parallel_utils_tests.cpp for reference.
* @param size : process count
* @param f : process function
* @param inline_single_process : use current process to run f if size==1
*/
void MultiProcessRun(int size, std::function<void(int, int)> f, bool inline_single_process = false);
inline int CreatePipes(std::vector<std::array<int, 2>>* pipes, int nproc)
{
pipes->resize(nproc);
for (int i = 0; i < nproc; i++) {
if (pipe((*pipes)[i].data()) == -1) {
WHOLEMEMORY_ERROR("Create pipe failed.");
return -1;
}
}
return 0;
}
inline void ClosePipes(std::vector<std::array<int, 2>>* pipes)
{
for (size_t i = 0; i < pipes->size(); i++) {
WHOLEMEMORY_CHECK(close(pipes->at(i)[0]) == 0);
WHOLEMEMORY_CHECK(close(pipes->at(i)[1]) == 0);
}
pipes->clear();
}
template <typename T>
inline void PipeBroadcast(
int rank, int world_size, int root, const std::vector<std::array<int, 2>>& pipes, T* data)
{
if (rank == root) {
for (int i = 0; i < world_size; i++) {
auto wret = write(pipes[i][1], data, sizeof(T));
if (wret != sizeof(T)) { WHOLEMEMORY_FATAL("write to pipe failed."); }
}
}
auto rret = read(pipes[rank][0], data, sizeof(T));
if (rret != sizeof(T)) { WHOLEMEMORY_FATAL("read to pipe failed."); }
}
template <typename T>
inline void PipeGroupBroadcast(int rank,
int world_size,
int group_root,
int group_size,
const std::vector<std::array<int, 2>>& pipes,
T* data)
{
WHOLEMEMORY_CHECK(world_size % group_size == 0);
if (rank % group_size == group_root) {
for (int i = rank - group_root; i < rank - group_root + group_size; i++) {
auto wret = write(pipes[i][1], data, sizeof(T));
if (wret != sizeof(T)) { WHOLEMEMORY_FATAL("write to pipe failed."); }
}
}
auto rret = read(pipes[rank][0], data, sizeof(T));
if (rret != sizeof(T)) { WHOLEMEMORY_FATAL("read to pipe failed."); }
}
class SideBandCommunicator;
SideBandCommunicator* StartSidebandCommunicator(int world_rank,
int world_size,
const char* server_addr,
int port);
void SideBandAllToAll(SideBandCommunicator* side_band_communicator,
const void* input,
void* output,
size_t element_size);
void SideBandAllGather(SideBandCommunicator* side_band_communicator,
const void* input,
void* output,
size_t element_size);
void SideBandBroadcast(SideBandCommunicator* side_band_communicator,
void* data,
size_t element_size,
int root_rank);
void ShutDownSidebandCommunicator(SideBandCommunicator* side_band_communicator);
int ForkGetDeviceCount();
| 0 |
rapidsai_public_repos/wholegraph/cpp
|
rapidsai_public_repos/wholegraph/cpp/src/cuda_macros.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_macros.hpp"
#include <algorithm>
#include <cstdlib>
#include <cuda_runtime_api.h>
namespace wholememory {
static bool s_debug_sync_mode = false;
__attribute__((constructor)) static void ReadDebugSyncModeFromEnv()
{
try {
char* debug_sync_env_str = std::getenv("WM_DEBUG_SYNC");
if (debug_sync_env_str != nullptr) {
std::string str = debug_sync_env_str;
std::transform(
str.begin(), str.end(), str.begin(), [](unsigned char c) { return std::tolower(c); });
if (str == "1" || str == "on" || str == "true") {
printf("[Notice] Enabled Debug Sync, performance may suffer.\n");
s_debug_sync_mode = true;
}
}
} catch (...) {
return;
}
}
void set_debug_sync_mode(bool debug_sync_mode) { s_debug_sync_mode = debug_sync_mode; }
void debug_synchronize(const char* filename, int line, cudaStream_t stream)
{
if (s_debug_sync_mode) {
cudaError_t status = cudaGetLastError();
if (status != cudaSuccess) {
printf("CUDA cudaGetLastError() failed at file=%s line=%d failed with %s\n",
filename,
line,
cudaGetErrorString(status));
abort();
}
status = cudaStreamSynchronize(stream);
if (status != cudaSuccess) {
printf("CUDA cudaStreamSynchronize() failed at file=%s line=%d failed with %s\n",
filename,
line,
cudaGetErrorString(status));
abort();
}
}
}
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp
|
rapidsai_public_repos/wholegraph/cpp/src/error.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdlib>
#include <raft/core/error.hpp>
namespace wholememory {
/**
* @brief Exception thrown when logical precondition is violated.
*
* This exception should not be thrown directly and is instead thrown by the
* WHOLEMEMORY_EXPECTS and WHOLEMEMORY_FAIL macros.
*
*/
struct logic_error : public raft::exception {
explicit logic_error(char const* const message) : raft::exception(message) {}
explicit logic_error(std::string const& message) : raft::exception(message) {}
};
} // namespace wholememory
/**
* Macro to append error message to first argument.
* This should only be called in contexts where it is OK to throw exceptions!
*/
#define SET_WHOLEMEMORY_ERROR_MSG(msg, location_prefix, fmt, ...) \
do { \
int const size1 = std::snprintf(nullptr, 0, "%s", location_prefix); \
int const size2 = std::snprintf(nullptr, 0, "file=%s line=%d: ", __FILE__, __LINE__); \
int const size3 = std::snprintf(nullptr, 0, fmt, ##__VA_ARGS__); \
if (size1 < 0 || size2 < 0 || size3 < 0) { \
(void)printf("Error in snprintf, cannot handle raft exception.\n"); \
(void)fflush(stdout); \
abort(); \
} \
auto size = size1 + size2 + size3 + 1; /* +1 for final '\0' */ \
std::vector<char> buf(size); \
(void)std::snprintf(buf.data(), size1 + 1 /* +1 for '\0' */, "%s", location_prefix); \
(void)std::snprintf( \
buf.data() + size1, size2 + 1 /* +1 for '\0' */, "file=%s line=%d: ", __FILE__, __LINE__); \
(void)std::snprintf( \
buf.data() + size1 + size2, size3 + 1 /* +1 for '\0' */, fmt, ##__VA_ARGS__); \
msg += std::string(buf.data(), buf.data() + size - 1); /* -1 to remove final '\0' */ \
} while (0)
/**
* @brief Macro for checking (pre-)conditions that throws an exception when a condition is false
*
* @param[in] cond Expression that evaluates to true or false
* @param[in] fmt String literal description of the reason that cond is expected to be true with
* optinal format tags
* @throw wholememory::logic_error if the condition evaluates to false.
*/
#define WHOLEMEMORY_EXPECTS(cond, fmt, ...) \
do { \
if (!(cond)) { \
std::string error_msg{}; \
SET_WHOLEMEMORY_ERROR_MSG(error_msg, "WholeMemory failure at ", fmt, ##__VA_ARGS__); \
throw wholememory::logic_error(error_msg); \
} \
} while (0)
/**
* @brief Macro for checking (pre-)conditions that abort when a condition is false
*
* @param[in] cond Expression that evaluates to true or false
* @param[in] fmt String literal description of the reason that cond is expected to be true with
* optinal format tags
*/
#define WHOLEMEMORY_EXPECTS_NOTHROW(cond, fmt, ...) \
do { \
if (!(cond)) { \
std::string error_msg{}; \
SET_WHOLEMEMORY_ERROR_MSG(error_msg, "WholeMemory failure at ", fmt, ##__VA_ARGS__); \
(void)printf("%s\n", error_msg.c_str()); \
(void)fflush(stdout); \
abort(); \
} \
} while (0)
/**
* @brief Indicates that an erroneous code path has been taken.
*
* @param[in] fmt String literal description of the reason that this code path is erroneous with
* optinal format tags
* @throw always throws wholememory::logic_error
*/
#define WHOLEMEMORY_FAIL(fmt, ...) \
do { \
std::string error_msg{}; \
SET_WHOLEMEMORY_ERROR_MSG(error_msg, "WholeMemory failure at ", fmt, ##__VA_ARGS__); \
throw wholememory::logic_error(error_msg); \
} while (0)
/**
* @brief Indicates that an erroneous code path has been taken.
*
* @param[in] fmt String literal description of the reason that this code path is erroneous with
* optinal format tags, this macro will not throw exceptions but abort the process.
*/
#define WHOLEMEMORY_FAIL_NOTHROW(fmt, ...) \
do { \
std::string error_msg{}; \
SET_WHOLEMEMORY_ERROR_MSG(error_msg, "WholeMemory failure at ", fmt, ##__VA_ARGS__); \
(void)printf("%s\n", error_msg.c_str()); \
(void)fflush(stdout); \
abort(); \
} while (0)
/**
* @brief Indicates that an erroneous code path has been taken.
*
* @param[in] X boolean expression to check
* @throw always throws wholememory::logic_error
*/
#define WHOLEMEMORY_CHECK(X) \
do { \
if (!(X)) { WHOLEMEMORY_FAIL("File %s, line %d, %s check failed.", __FILE__, __LINE__, #X); } \
} while (0)
/**
* @brief Indicates that an erroneous code path has been taken.
*
* @param[in] X boolean expression to check
*/
#define WHOLEMEMORY_CHECK_NOTHROW(X) \
do { \
if (!(X)) { \
WHOLEMEMORY_FAIL_NOTHROW("File %s, line %d, %s check failed.", __FILE__, __LINE__, #X); \
} \
} while (0)
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/wholememory_test_op.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <wholememory/wholememory_op.h>
#include "cuda_macros.hpp"
#include "error.hpp"
#include "logger.hpp"
#include "output_memory_handle.hpp"
#include "register.hpp"
#include "temp_memory_handle.hpp"
template <typename DataTypeT>
__global__ void EnvTestTempFUnc(const DataTypeT* input_ptr,
DataTypeT* output_ptr,
int64_t emb_dim,
int64_t output_stride)
{
int id = blockIdx.x;
output_ptr += output_stride * id;
float f_id = static_cast<float>(id);
for (int idx = threadIdx.x; idx < emb_dim; idx += blockDim.x) {
output_ptr[idx] = static_cast<DataTypeT>(f_id) + input_ptr[idx];
}
}
template <typename DataTypeT>
void EnvTestTempFunc(const void* input_ptr,
void* out_ptr,
int64_t emb_dim,
int64_t entry_count,
int64_t output_stride,
cudaStream_t stream)
{
int thread_count = std::min<int>(emb_dim, 512);
int block_count = entry_count;
EnvTestTempFUnc<DataTypeT>
<<<block_count, thread_count, 0, stream>>>(static_cast<const DataTypeT*>(input_ptr),
static_cast<DataTypeT*>(out_ptr),
emb_dim,
output_stride);
WM_CUDA_CHECK_NO_THROW(cudaGetLastError());
WM_CUDA_DEBUG_SYNC_STREAM(stream);
}
REGISTER_DISPATCH_ONE_TYPE(EnvTestTempFunc, EnvTestTempFunc, ALLSINT_ALLFLOAT)
#ifdef __cplusplus
extern "C" {
#endif
wholememory_error_code_t wholememory_env_test_op(wholememory_tensor_t input_tensor,
wholememory_tensor_t output_fixed_tensor,
void* output_variable_device_tensor_handle,
void* output_variable_pinned_tensor_handle,
void* output_variable_host_tensor_handle,
int64_t output_variable_entry_count,
wholememory_env_func_t* p_env_fns,
void* stream)
{
auto* input_desc = wholememory_tensor_get_tensor_description(input_tensor);
auto* output_desc = wholememory_tensor_get_tensor_description(output_fixed_tensor);
WHOLEMEMORY_CHECK_NOTHROW(input_desc->dim == 1);
int64_t emb_dim = input_desc->sizes[0];
WHOLEMEMORY_CHECK_NOTHROW(output_desc->dim == 2);
WHOLEMEMORY_CHECK_NOTHROW(output_desc->sizes[0] == output_variable_entry_count);
WHOLEMEMORY_CHECK_NOTHROW(output_desc->sizes[1] == emb_dim);
WHOLEMEMORY_CHECK_NOTHROW(input_desc->dtype == output_desc->dtype);
wholememory_ops::output_memory_handle out_device_handle(p_env_fns,
output_variable_device_tensor_handle);
wholememory_ops::output_memory_handle out_pinned_handle(p_env_fns,
output_variable_pinned_tensor_handle);
wholememory_ops::output_memory_handle out_host_handle(p_env_fns,
output_variable_host_tensor_handle);
wholememory_ops::temp_memory_handle temp_buffer_handle(p_env_fns);
// fprintf(stderr, "===> IN OP start allocate temp device ptr.\n");
void* temp_buffer_ptr =
temp_buffer_handle.device_malloc(output_variable_entry_count * emb_dim, input_desc->dtype);
// fprintf(stderr, "===> IN OP temp device allocated=%ld\n",
// reinterpret_cast<int64_t>(temp_buffer_ptr));
size_t output_size =
output_variable_entry_count * emb_dim * wholememory_dtype_get_element_size(input_desc->dtype);
cudaStream_t cuda_stream = static_cast<cudaStream_t>(stream);
// fprintf(stderr, "===> IN OP start computing.\n");
DISPATCH_ONE_TYPE(input_desc->dtype,
EnvTestTempFunc,
wholememory_tensor_get_data_pointer(input_tensor),
temp_buffer_ptr,
emb_dim,
output_variable_entry_count,
output_desc->strides[0],
cuda_stream);
// fprintf(stderr, "===> IN OP compute done.\n");
// fprintf(stderr, "===> IN OP start allocate output device ptr.\n");
void* output_device_ptr = nullptr;
if (output_variable_device_tensor_handle != nullptr) {
output_device_ptr = out_device_handle.device_malloc(output_desc);
}
// fprintf(stderr, "===> IN OP Output device allocated=%ld\n",
// reinterpret_cast<int64_t>(output_device_ptr));
// fprintf(stderr, "===> IN OP start allocate output pinned ptr.\n");
void* output_pinned_ptr = nullptr;
if (output_variable_pinned_tensor_handle != nullptr) {
output_pinned_ptr = out_pinned_handle.pinned_malloc(output_desc);
}
// fprintf(stderr, "===> IN OP Output pinned allocated=%ld\n",
// reinterpret_cast<int64_t>(output_pinned_ptr));
// fprintf(stderr, "===> IN OP start allocate output host ptr.\n");
void* output_host_ptr = nullptr;
if (output_variable_host_tensor_handle != nullptr) {
output_host_ptr = out_host_handle.host_malloc(output_desc);
}
// fprintf(stderr, "===> IN OP Output host allocated=%ld\n",
// reinterpret_cast<int64_t>(output_host_ptr));
WM_CUDA_CHECK_NO_THROW(cudaMemcpyAsync(wholememory_tensor_get_data_pointer(output_fixed_tensor),
temp_buffer_ptr,
output_size,
cudaMemcpyDefault,
cuda_stream));
if (output_device_ptr)
WM_CUDA_CHECK_NO_THROW(cudaMemcpyAsync(
output_device_ptr, temp_buffer_ptr, output_size, cudaMemcpyDefault, cuda_stream));
if (output_pinned_ptr)
WM_CUDA_CHECK_NO_THROW(cudaMemcpyAsync(
output_pinned_ptr, temp_buffer_ptr, output_size, cudaMemcpyDefault, cuda_stream));
if (output_host_ptr)
WM_CUDA_CHECK_NO_THROW(cudaMemcpyAsync(
output_host_ptr, temp_buffer_ptr, output_size, cudaMemcpyDefault, cuda_stream));
WM_CUDA_DEBUG_SYNC_STREAM(static_cast<cudaStream_t>(stream));
// fprintf(stderr, "===> IN OP all done.\n");
return WHOLEMEMORY_SUCCESS;
}
#ifdef __cplusplus
}
#endif
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/scatter_op_impl_mapped.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime_api.h>
#include <wholememory/env_func_ptrs.h>
#include <wholememory/wholememory.h>
#include "wholememory_ops/functions/gather_scatter_func.h"
namespace wholememory_ops {
wholememory_error_code_t wholememory_scatter_mapped(
void* input,
wholememory_matrix_description_t input_desc,
void* indices,
wholememory_array_description_t indices_desc,
wholememory_gref_t wholememory_gref,
wholememory_matrix_description_t wholememory_desc,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream)
{
return scatter_func(
input, input_desc, indices, indices_desc, wholememory_gref, wholememory_desc, stream);
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/gather_op_impl_nccl.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime_api.h>
#include <wholememory/env_func_ptrs.h>
#include <wholememory/wholememory.h>
#include "logger.hpp"
#include "wholememory/communicator.hpp"
#include "wholememory/memory_handle.hpp"
#include "wholememory_ops/functions/bucket_ids_func.h"
#include "wholememory_ops/functions/exchange_embeddings_nccl_func.h"
#include "wholememory_ops/functions/exchange_ids_nccl_func.h"
#include "wholememory_ops/functions/gather_scatter_func.h"
#include "wholememory_ops/temp_memory_handle.hpp"
#include "wholememory_ops/thrust_allocator.hpp"
namespace wholememory_ops {
wholememory_error_code_t wholememory_gather_nccl(wholememory_handle_t wholememory_handle,
wholememory_matrix_description_t wholememory_desc,
void* indices,
wholememory_array_description_t indice_desc,
void* output,
wholememory_matrix_description_t output_desc,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream)
{
try {
if (wholememory_desc.storage_offset < 0 ||
wholememory_desc.storage_offset + wholememory_desc.sizes[1] > wholememory_desc.stride) {
return WHOLEMEMORY_INVALID_INPUT;
}
wm_thrust_allocator thrust_allocator(p_env_fns);
size_t embedding_size_per_rank;
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_get_partition_plan(&embedding_size_per_rank, wholememory_handle));
size_t element_size = wholememory_dtype_get_element_size(wholememory_desc.dtype);
size_t embedding_entry_size = element_size * wholememory_desc.stride;
WHOLEMEMORY_EXPECTS_NOTHROW(
embedding_size_per_rank % embedding_entry_size == 0,
"embedding_size_per_rank=%ld is not multiple of embedding_entry_size=%ldx%ld",
embedding_size_per_rank,
element_size,
wholememory_desc.stride);
size_t embedding_entry_count_per_rank = embedding_size_per_rank / embedding_entry_size;
wholememory_comm_t wm_comm;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_get_communicator(&wm_comm, wholememory_handle));
int world_size;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_size(&world_size, wm_comm));
temp_memory_handle host_rank_id_count(p_env_fns), host_recv_rank_id_count(p_env_fns);
int64_t* host_rank_id_count_ptr =
static_cast<int64_t*>(host_rank_id_count.host_malloc(world_size, WHOLEMEMORY_DT_INT64));
int64_t* host_recv_rank_id_count_ptr =
static_cast<int64_t*>(host_recv_rank_id_count.host_malloc(world_size, WHOLEMEMORY_DT_INT64));
temp_memory_handle dev_recv_indice_buffer(p_env_fns);
temp_memory_handle dev_raw_indice(p_env_fns);
int64_t* dev_raw_indice_ptr =
static_cast<int64_t*>(dev_raw_indice.device_malloc(indice_desc.size, WHOLEMEMORY_DT_INT64));
int64_t total_recv_count = 0;
WHOLEMEMORY_RETURN_ON_FAIL(bucket_and_exchange_ids_func(indices,
indice_desc,
host_recv_rank_id_count_ptr,
host_rank_id_count_ptr,
&dev_recv_indice_buffer,
dev_raw_indice_ptr,
embedding_entry_count_per_rank,
wm_comm,
&thrust_allocator,
p_env_fns,
stream));
// Local Gather
for (int i = 0; i < world_size; i++) {
total_recv_count += host_recv_rank_id_count_ptr[i];
}
size_t local_mem_offset, local_mem_size;
temp_memory_handle dev_local_gather_buffer(p_env_fns);
temp_memory_handle dev_embedding_recv_buffer(p_env_fns);
void* dev_local_gather_buffer_ptr = dev_local_gather_buffer.device_malloc(
wholememory_desc.sizes[1] * total_recv_count, output_desc.dtype);
void* dev_embedding_recv_buffer_ptr = dev_embedding_recv_buffer.device_malloc(
wholememory_desc.sizes[1] * indice_desc.size, output_desc.dtype);
void* local_fake_ptr = nullptr;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_get_local_memory(
&local_fake_ptr, &local_mem_size, &local_mem_offset, wholememory_handle));
local_fake_ptr = static_cast<char*>(local_fake_ptr) - local_mem_offset;
wholememory_gref_t local_fake_gref =
wholememory_create_continuous_global_reference(local_fake_ptr);
int64_t local_buffer_size[2] = {total_recv_count, wholememory_desc.sizes[1]};
wholememory_matrix_description_t local_gather_buffer_desc = wholememory_create_matrix_desc(
local_buffer_size, wholememory_desc.sizes[1], 0, output_desc.dtype);
auto dev_recv_indice_desc =
wholememory_create_array_desc(total_recv_count, 0, indice_desc.dtype);
WHOLEMEMORY_RETURN_ON_FAIL(gather_func(local_fake_gref,
wholememory_desc,
dev_recv_indice_buffer.pointer(),
dev_recv_indice_desc,
dev_local_gather_buffer_ptr,
local_gather_buffer_desc,
stream));
// AllToAllV for embeddings
size_t embedding_size =
wholememory_desc.sizes[1] * wholememory_dtype_get_element_size(output_desc.dtype);
WHOLEMEMORY_RETURN_ON_FAIL(exchange_embeddings_nccl_func(dev_local_gather_buffer_ptr,
host_recv_rank_id_count_ptr,
host_rank_id_count_ptr,
dev_embedding_recv_buffer_ptr,
embedding_size,
wm_comm,
stream));
// Local reorder
int64_t total_need_indice_count = 0;
for (int i = 0; i < world_size; i++) {
total_need_indice_count += host_rank_id_count_ptr[i];
}
wholememory_gref_t output_gref = wholememory_create_continuous_global_reference(output);
wholememory_matrix_description_t local_recv_buffer_desc =
wholememory_create_matrix_desc(output_desc.sizes, output_desc.sizes[1], 0, output_desc.dtype);
local_recv_buffer_desc.sizes[0] = total_need_indice_count;
auto raw_indice_desc =
wholememory_create_array_desc(total_need_indice_count, 0, WHOLEMEMORY_DT_INT64);
WHOLEMEMORY_RETURN_ON_FAIL(scatter_func(dev_embedding_recv_buffer_ptr,
local_recv_buffer_desc,
dev_raw_indice_ptr,
raw_indice_desc,
output_gref,
output_desc,
stream));
WM_CUDA_CHECK(cudaGetLastError());
WM_CUDA_CHECK(cudaStreamSynchronize(stream));
} catch (wholememory::cuda_error& wce) {
WHOLEMEMORY_ERROR("CUDA logic Error %s\n", wce.what());
return WHOLEMEMORY_CUDA_ERROR;
} catch (wholememory::logic_error& wle) {
WHOLEMEMORY_ERROR("LOGIC Error %s\n", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (...) {
return WHOLEMEMORY_UNKNOW_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/thrust_allocator.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "thrust_allocator.hpp"
#include "error.hpp"
#include "wholememory/integer_utils.hpp"
namespace wholememory_ops {
wm_thrust_allocator::~wm_thrust_allocator() { deallocate_all(); }
wm_thrust_allocator::value_type* wm_thrust_allocator::allocate(std::ptrdiff_t mem_size)
{
static const std::ptrdiff_t kThrustAlignSize = 256;
mem_size = std::max<std::ptrdiff_t>(kThrustAlignSize, mem_size);
mem_size = wholememory::div_rounding_up_unsafe(mem_size, kThrustAlignSize) * kThrustAlignSize;
void* memory_context = nullptr;
fns->temporary_fns.create_memory_context_fn(&memory_context, fns->temporary_fns.global_context);
wholememory_tensor_description_t tensor_description;
wholememory_initialize_tensor_desc(&tensor_description);
tensor_description.dim = 1;
tensor_description.dtype = WHOLEMEMORY_DT_INT64;
tensor_description.sizes[0] = mem_size / sizeof(int64_t);
auto* ptr = static_cast<value_type*>(fns->temporary_fns.malloc_fn(
&tensor_description, WHOLEMEMORY_MA_DEVICE, memory_context, fns->temporary_fns.global_context));
mem_ptr_to_context_map.emplace(ptr, memory_context);
return ptr;
}
void wm_thrust_allocator::deallocate(value_type* p, size_t /*mem_size*/)
{
auto it = mem_ptr_to_context_map.find(p);
WHOLEMEMORY_CHECK_NOTHROW(it != mem_ptr_to_context_map.end());
fns->temporary_fns.free_fn(it->second, fns->temporary_fns.global_context);
fns->temporary_fns.destroy_memory_context_fn(it->second, fns->temporary_fns.global_context);
mem_ptr_to_context_map.erase(p);
}
void wm_thrust_allocator::deallocate_all()
{
while (!mem_ptr_to_context_map.empty()) {
auto it = mem_ptr_to_context_map.begin();
fns->temporary_fns.free_fn(it->second, fns->temporary_fns.global_context);
fns->temporary_fns.destroy_memory_context_fn(it->second, fns->temporary_fns.global_context);
mem_ptr_to_context_map.erase(it->first);
}
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/scatter_op_impl.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/global_reference.h>
#include <wholememory/wholememory.h>
namespace wholememory_ops {
wholememory_error_code_t wholememory_scatter_mapped(
void* input,
wholememory_matrix_description_t input_desc,
void* indices,
wholememory_array_description_t indices_desc,
wholememory_gref_t wholememory_gref,
wholememory_matrix_description_t wholememory_desc,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream);
wholememory_error_code_t wholememory_scatter_nccl(void* input,
wholememory_matrix_description_t input_desc,
void* indices,
wholememory_array_description_t indices_desc,
wholememory_handle_t wholememory_handle,
wholememory_matrix_description_t wholememory_desc,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream);
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/output_memory_handle.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/env_func_ptrs.h>
#include <wholememory/tensor_description.h>
namespace wholememory_ops {
class output_memory_handle {
public:
explicit output_memory_handle(wholememory_env_func_t* env_fns, void* memory_context)
{
output_mem_fns_ = &env_fns->output_fns;
memory_context_ = memory_context;
}
output_memory_handle() = delete;
~output_memory_handle() {}
void* device_malloc(size_t elt_count, wholememory_dtype_t data_type)
{
wholememory_tensor_description_t tensor_description;
get_tensor_description(&tensor_description, elt_count, data_type);
ptr_ = output_mem_fns_->malloc_fn(
&tensor_description, WHOLEMEMORY_MA_DEVICE, memory_context_, output_mem_fns_->global_context);
return ptr_;
}
void* device_malloc(wholememory_tensor_description_t* tensor_desc)
{
ptr_ = output_mem_fns_->malloc_fn(
tensor_desc, WHOLEMEMORY_MA_DEVICE, memory_context_, output_mem_fns_->global_context);
return ptr_;
}
void* host_malloc(size_t elt_count, wholememory_dtype_t data_type)
{
wholememory_tensor_description_t tensor_description;
get_tensor_description(&tensor_description, elt_count, data_type);
ptr_ = output_mem_fns_->malloc_fn(
&tensor_description, WHOLEMEMORY_MA_HOST, memory_context_, output_mem_fns_->global_context);
return ptr_;
}
void* host_malloc(wholememory_tensor_description_t* tensor_desc)
{
ptr_ = output_mem_fns_->malloc_fn(
tensor_desc, WHOLEMEMORY_MA_HOST, memory_context_, output_mem_fns_->global_context);
return ptr_;
}
void* pinned_malloc(size_t elt_count, wholememory_dtype_t data_type)
{
wholememory_tensor_description_t tensor_description;
get_tensor_description(&tensor_description, elt_count, data_type);
ptr_ = output_mem_fns_->malloc_fn(
&tensor_description, WHOLEMEMORY_MA_PINNED, memory_context_, output_mem_fns_->global_context);
return ptr_;
}
void* pinned_malloc(wholememory_tensor_description_t* tensor_desc)
{
ptr_ = output_mem_fns_->malloc_fn(
tensor_desc, WHOLEMEMORY_MA_PINNED, memory_context_, output_mem_fns_->global_context);
return ptr_;
}
void* pointer() const { return ptr_; }
private:
static void get_tensor_description(wholememory_tensor_description_t* tensor_description,
size_t elt_count,
wholememory_dtype_t data_type)
{
wholememory_initialize_tensor_desc(tensor_description);
tensor_description->dim = 1;
tensor_description->storage_offset = 0;
tensor_description->dtype = data_type;
tensor_description->sizes[0] = elt_count;
tensor_description->strides[0] = 1;
}
wholememory_output_memory_func_t* output_mem_fns_ = nullptr;
void* memory_context_ = nullptr;
void* ptr_ = nullptr;
};
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/scatter_op_impl_nccl.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime_api.h>
#include <wholememory/env_func_ptrs.h>
#include <wholememory/wholememory.h>
#include "logger.hpp"
#include "wholememory/communicator.hpp"
#include "wholememory/memory_handle.hpp"
#include "wholememory_ops/functions/bucket_ids_func.h"
#include "wholememory_ops/functions/exchange_embeddings_nccl_func.h"
#include "wholememory_ops/functions/exchange_ids_nccl_func.h"
#include "wholememory_ops/functions/gather_scatter_func.h"
#include "wholememory_ops/temp_memory_handle.hpp"
#include "wholememory_ops/thrust_allocator.hpp"
namespace wholememory_ops {
wholememory_error_code_t wholememory_scatter_nccl(void* input,
wholememory_matrix_description_t input_desc,
void* indices,
wholememory_array_description_t indices_desc,
wholememory_handle_t wholememory_handle,
wholememory_matrix_description_t wholememory_desc,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream)
{
try {
if (wholememory_desc.storage_offset < 0 ||
wholememory_desc.storage_offset + wholememory_desc.sizes[1] > wholememory_desc.stride) {
WHOLEMEMORY_ERROR("invalid input offset=%ld, size[1]=%ld, stride=%ld\n",
wholememory_desc.storage_offset,
wholememory_desc.sizes[1],
wholememory_desc.stride);
return WHOLEMEMORY_INVALID_INPUT;
}
wm_thrust_allocator thrust_allocator(p_env_fns);
size_t embedding_size_per_rank;
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_get_partition_plan(&embedding_size_per_rank, wholememory_handle));
size_t element_size = wholememory_dtype_get_element_size(wholememory_desc.dtype);
size_t embedding_entry_size = element_size * wholememory_desc.stride;
WHOLEMEMORY_EXPECTS_NOTHROW(
embedding_size_per_rank % embedding_entry_size == 0,
"embedding_size_per_rank=%ld is not multiple of embedding_entry_size=%ldx%ld",
embedding_size_per_rank,
element_size,
wholememory_desc.stride);
size_t embedding_entry_count_per_rank = embedding_size_per_rank / embedding_entry_size;
wholememory_comm_t wm_comm;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_get_communicator(&wm_comm, wholememory_handle));
int world_size;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_size(&world_size, wm_comm));
temp_memory_handle host_rank_id_count(p_env_fns), host_recv_rank_id_count(p_env_fns);
int64_t* host_rank_id_count_ptr =
static_cast<int64_t*>(host_rank_id_count.host_malloc(world_size, WHOLEMEMORY_DT_INT64));
int64_t* host_recv_rank_id_count_ptr =
static_cast<int64_t*>(host_recv_rank_id_count.host_malloc(world_size, WHOLEMEMORY_DT_INT64));
temp_memory_handle dev_recv_indice_buffer(p_env_fns);
temp_memory_handle dev_raw_indice(p_env_fns);
int64_t* dev_raw_indice_ptr =
static_cast<int64_t*>(dev_raw_indice.device_malloc(indices_desc.size, WHOLEMEMORY_DT_INT64));
int64_t total_recv_count = 0;
WHOLEMEMORY_RETURN_ON_FAIL(bucket_and_exchange_ids_func(indices,
indices_desc,
host_recv_rank_id_count_ptr,
host_rank_id_count_ptr,
&dev_recv_indice_buffer,
dev_raw_indice_ptr,
embedding_entry_count_per_rank,
wm_comm,
&thrust_allocator,
p_env_fns,
stream));
// Local Reorder
for (int i = 0; i < world_size; i++) {
total_recv_count += host_recv_rank_id_count_ptr[i];
}
temp_memory_handle dev_local_reorder_buffer(p_env_fns), dev_embedding_recv_buffer(p_env_fns);
auto local_reorder_desc =
wholememory_create_matrix_desc(input_desc.sizes, input_desc.sizes[1], 0, input_desc.dtype);
void* dev_local_reorder_buffer_ptr = dev_local_reorder_buffer.device_malloc(
wholememory_get_memory_element_count_from_matrix(&local_reorder_desc), input_desc.dtype);
wholememory_gref_t input_gref = wholememory_create_continuous_global_reference(input);
auto dev_raw_indice_desc =
wholememory_create_array_desc(indices_desc.size, 0, WHOLEMEMORY_DT_INT64);
WHOLEMEMORY_RETURN_ON_FAIL(gather_func(input_gref,
input_desc,
dev_raw_indice_ptr,
dev_raw_indice_desc,
dev_local_reorder_buffer_ptr,
local_reorder_desc,
stream));
// AllToAllV for embeddings
void* dev_embedding_recv_buffer_ptr = dev_embedding_recv_buffer.device_malloc(
total_recv_count * input_desc.sizes[1], input_desc.dtype);
size_t embedding_size =
wholememory_desc.sizes[1] * wholememory_dtype_get_element_size(input_desc.dtype);
WHOLEMEMORY_RETURN_ON_FAIL(exchange_embeddings_nccl_func(dev_local_reorder_buffer_ptr,
host_rank_id_count_ptr,
host_recv_rank_id_count_ptr,
dev_embedding_recv_buffer_ptr,
embedding_size,
wm_comm,
stream));
// Local scatter
size_t local_mem_offset, local_mem_size;
void* local_fake_ptr = nullptr;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_get_local_memory(
&local_fake_ptr, &local_mem_size, &local_mem_offset, wholememory_handle));
local_fake_ptr = static_cast<char*>(local_fake_ptr) - local_mem_offset;
wholememory_gref_t local_fake_embedding_gref =
wholememory_create_continuous_global_reference(local_fake_ptr);
std::vector<int64_t> recv_embedding_sizes = {total_recv_count, input_desc.sizes[1]};
wholememory_matrix_description_t recv_embedding_desc = wholememory_create_matrix_desc(
recv_embedding_sizes.data(), input_desc.sizes[1], 0, input_desc.dtype);
auto recv_indices_desc = wholememory_create_array_desc(total_recv_count, 0, indices_desc.dtype);
WHOLEMEMORY_RETURN_ON_FAIL(scatter_func(dev_embedding_recv_buffer_ptr,
recv_embedding_desc,
dev_recv_indice_buffer.pointer(),
recv_indices_desc,
local_fake_embedding_gref,
wholememory_desc,
stream));
WM_CUDA_CHECK(cudaGetLastError());
WM_CUDA_CHECK(cudaStreamSynchronize(stream));
} catch (wholememory::cuda_error& wce) {
WHOLEMEMORY_ERROR("CUDA logic Error %s\n", wce.what());
return WHOLEMEMORY_CUDA_ERROR;
} catch (wholememory::logic_error& wle) {
WHOLEMEMORY_ERROR("LOGIC Error %s\n", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (...) {
WHOLEMEMORY_ERROR("Unknown Error\n");
return WHOLEMEMORY_UNKNOW_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/scatter_op.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <wholememory/wholememory_op.h>
#include <wholememory_ops/scatter_op_impl.h>
#include "error.hpp"
#include "logger.hpp"
wholememory_error_code_t wholememory_scatter(wholememory_tensor_t input_tensor,
wholememory_tensor_t indices_tensor,
wholememory_tensor_t wholememory_tensor,
wholememory_env_func_t* p_env_fns,
void* stream)
{
bool const has_handle = wholememory_tensor_has_handle(wholememory_tensor);
wholememory_memory_type_t memory_type = WHOLEMEMORY_MT_NONE;
if (has_handle) {
memory_type =
wholememory_get_memory_type(wholememory_tensor_get_memory_handle(wholememory_tensor));
}
wholememory_matrix_description_t matrix_description;
auto tensor_description = *wholememory_tensor_get_tensor_description(wholememory_tensor);
if (tensor_description.dim != 1 && tensor_description.dim != 2) {
WHOLEMEMORY_ERROR("wholememory_tensor should be 1D or 2D tensor.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (tensor_description.dim == 1) {
if (!wholememory_unsqueeze_tensor(&tensor_description, 1)) {
WHOLEMEMORY_ERROR("Output 1D wholememory_tensor unsqueeze to 2D failed.");
return WHOLEMEMORY_INVALID_INPUT;
}
}
if (!wholememory_convert_tensor_desc_to_matrix(&matrix_description, &tensor_description)) {
WHOLEMEMORY_ERROR("Output wholememory_tensor convert to matrix failed.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (wholememory_tensor_get_tensor_description(indices_tensor)->dim != 1) {
WHOLEMEMORY_ERROR("indices tensor should be 1D tensor");
return WHOLEMEMORY_INVALID_INPUT;
}
wholememory_tensor_description_t input_tensor_desc =
*wholememory_tensor_get_tensor_description(input_tensor);
if (input_tensor_desc.dim != tensor_description.dim) {
WHOLEMEMORY_ERROR("input tensor should be same dim as wholememory_tensor.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (input_tensor_desc.dim == 1) {
if (!wholememory_unsqueeze_tensor(&input_tensor_desc, 1)) {
WHOLEMEMORY_ERROR("Input 1D wholememory_tensor unsqueeze to 2D failed.");
return WHOLEMEMORY_LOGIC_ERROR;
}
}
void* indices = wholememory_tensor_get_data_pointer(indices_tensor);
void* input = wholememory_tensor_get_data_pointer(input_tensor);
wholememory_array_description_t indices_desc;
wholememory_matrix_description_t input_desc;
if (!wholememory_convert_tensor_desc_to_array(
&indices_desc, wholememory_tensor_get_tensor_description(indices_tensor))) {
WHOLEMEMORY_ERROR("Convert indices tensor to array failed.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (!wholememory_convert_tensor_desc_to_matrix(&input_desc, &input_tensor_desc)) {
WHOLEMEMORY_ERROR("Convert input tensor to matrix failed.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (has_handle && memory_type == WHOLEMEMORY_MT_DISTRIBUTED) {
return wholememory_ops::wholememory_scatter_nccl(
input,
input_desc,
indices,
indices_desc,
wholememory_tensor_get_memory_handle(wholememory_tensor),
matrix_description,
p_env_fns,
static_cast<cudaStream_t>(stream));
}
WHOLEMEMORY_EXPECTS_NOTHROW(!has_handle || memory_type == WHOLEMEMORY_MT_CHUNKED ||
memory_type == WHOLEMEMORY_MT_CONTINUOUS,
"Memory type not supported.");
wholememory_gref_t gref;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_tensor_get_global_reference(wholememory_tensor, &gref));
return wholememory_ops::wholememory_scatter_mapped(input,
input_desc,
indices,
indices_desc,
gref,
matrix_description,
p_env_fns,
static_cast<cudaStream_t>(stream));
}
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/gather_op_impl_mapped.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime_api.h>
#include <wholememory/env_func_ptrs.h>
#include <wholememory/wholememory.h>
#include "cuda_macros.hpp"
#include "wholememory_ops/functions/gather_scatter_func.h"
namespace wholememory_ops {
wholememory_error_code_t wholememory_gather_mapped(
wholememory_gref_t wholememory_gref,
wholememory_matrix_description_t wholememory_desc,
void* indices,
wholememory_array_description_t indice_desc,
void* output,
wholememory_matrix_description_t output_desc,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream)
{
WHOLEMEMORY_RETURN_ON_FAIL(gather_func(
wholememory_gref, wholememory_desc, indices, indice_desc, output, output_desc, stream));
WM_CUDA_DEBUG_SYNC_STREAM(stream);
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/temp_memory_handle.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/env_func_ptrs.h>
#include <wholememory/tensor_description.h>
namespace wholememory_ops {
class temp_memory_handle {
public:
explicit temp_memory_handle(wholememory_env_func_t* env_fns)
{
temp_mem_fns_ = &env_fns->temporary_fns;
temp_mem_fns_->create_memory_context_fn(&memory_context_, temp_mem_fns_->global_context);
}
temp_memory_handle() = delete;
~temp_memory_handle() { free_memory(); }
void* device_malloc(size_t elt_count, wholememory_dtype_t data_type)
{
free_memory();
wholememory_tensor_description_t tensor_description;
get_tensor_description(&tensor_description, elt_count, data_type);
ptr_ = temp_mem_fns_->malloc_fn(
&tensor_description, WHOLEMEMORY_MA_DEVICE, memory_context_, temp_mem_fns_->global_context);
return ptr_;
}
void* host_malloc(size_t elt_count, wholememory_dtype_t data_type)
{
free_memory();
wholememory_tensor_description_t tensor_description;
get_tensor_description(&tensor_description, elt_count, data_type);
ptr_ = temp_mem_fns_->malloc_fn(
&tensor_description, WHOLEMEMORY_MA_HOST, memory_context_, temp_mem_fns_->global_context);
return ptr_;
}
void* pinned_malloc(size_t elt_count, wholememory_dtype_t data_type)
{
free_memory();
wholememory_tensor_description_t tensor_description;
get_tensor_description(&tensor_description, elt_count, data_type);
ptr_ = temp_mem_fns_->malloc_fn(
&tensor_description, WHOLEMEMORY_MA_PINNED, memory_context_, temp_mem_fns_->global_context);
return ptr_;
}
[[nodiscard]] void* pointer() const { return ptr_; }
void free_memory()
{
if (ptr_ != nullptr) {
temp_mem_fns_->free_fn(memory_context_, temp_mem_fns_->global_context);
temp_mem_fns_->destroy_memory_context_fn(memory_context_, temp_mem_fns_->global_context);
memory_context_ = nullptr;
ptr_ = nullptr;
}
}
private:
static void get_tensor_description(wholememory_tensor_description_t* tensor_description,
size_t elt_count,
wholememory_dtype_t data_type)
{
wholememory_initialize_tensor_desc(tensor_description);
tensor_description->dim = 1;
tensor_description->storage_offset = 0;
tensor_description->dtype = data_type;
tensor_description->sizes[0] = elt_count;
tensor_description->strides[0] = 1;
}
wholememory_temp_memory_func_t* temp_mem_fns_ = nullptr;
void* memory_context_ = nullptr;
void* ptr_ = nullptr;
};
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/thrust_allocator.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <map>
#include <wholememory/env_func_ptrs.h>
namespace wholememory_ops {
class wm_thrust_allocator {
public:
using value_type = char;
explicit wm_thrust_allocator(wholememory_env_func_t* fns) : fns(fns) {}
wm_thrust_allocator() = delete;
~wm_thrust_allocator();
value_type* allocate(std::ptrdiff_t mem_size);
void deallocate(value_type* p, size_t mem_size);
void deallocate_all();
wholememory_env_func_t* fns;
std::map<value_type*, void*> mem_ptr_to_context_map;
};
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/register.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <unordered_map>
#include <cuda_bf16.h>
#include <cuda_fp16.h>
#include <wholememory/tensor_description.h>
#include "error.hpp"
namespace wholememory_ops {
struct one_wmt_hash : public std::unary_function<wholememory_dtype_t, std::size_t> {
inline std::size_t operator()(const wholememory_dtype_t& k) const
{
return static_cast<size_t>(k);
}
};
struct two_wmt_hash
: public std::unary_function<std::tuple<wholememory_dtype_t, wholememory_dtype_t>, std::size_t> {
inline std::size_t operator()(const std::tuple<wholememory_dtype_t, wholememory_dtype_t>& k) const
{
return static_cast<size_t>(std::get<1>(k)) * (static_cast<size_t>(WHOLEMEMORY_DT_COUNT)) +
static_cast<size_t>(std::get<0>(k));
}
};
struct three_wmt_hash : public std::unary_function<
std::tuple<wholememory_dtype_t, wholememory_dtype_t, wholememory_dtype_t>,
std::size_t> {
inline std::size_t operator()(
const std::tuple<wholememory_dtype_t, wholememory_dtype_t, wholememory_dtype_t>& k) const
{
return static_cast<size_t>(std::get<2>(k)) * (static_cast<size_t>(WHOLEMEMORY_DT_COUNT)) *
(static_cast<size_t>(WHOLEMEMORY_DT_COUNT)) +
static_cast<size_t>(std::get<1>(k)) * (static_cast<size_t>(WHOLEMEMORY_DT_COUNT)) +
static_cast<size_t>(std::get<0>(k));
}
};
} // namespace wholememory_ops
template <typename DataTypeT>
inline wholememory_dtype_t get_wholememory_dtype()
{
WHOLEMEMORY_FAIL_NOTHROW("get_wholememory_dtype type not supported.");
return WHOLEMEMORY_DT_UNKNOWN;
}
template <>
inline wholememory_dtype_t get_wholememory_dtype<int8_t>()
{
return WHOLEMEMORY_DT_INT8;
}
template <>
inline wholememory_dtype_t get_wholememory_dtype<int16_t>()
{
return WHOLEMEMORY_DT_INT16;
}
template <>
inline wholememory_dtype_t get_wholememory_dtype<int32_t>()
{
return WHOLEMEMORY_DT_INT;
}
template <>
inline wholememory_dtype_t get_wholememory_dtype<int64_t>()
{
return WHOLEMEMORY_DT_INT64;
}
template <>
inline wholememory_dtype_t get_wholememory_dtype<__half>()
{
return WHOLEMEMORY_DT_HALF;
}
template <>
inline wholememory_dtype_t get_wholememory_dtype<__nv_bfloat16>()
{
return WHOLEMEMORY_DT_BF16;
}
template <>
inline wholememory_dtype_t get_wholememory_dtype<float>()
{
return WHOLEMEMORY_DT_FLOAT;
}
template <>
inline wholememory_dtype_t get_wholememory_dtype<double>()
{
return WHOLEMEMORY_DT_DOUBLE;
}
#define VEC_SINT3264 std::vector<wholememory_dtype_t>({WHOLEMEMORY_DT_INT, WHOLEMEMORY_DT_INT64})
#define VEC_ALLSINT \
std::vector<wholememory_dtype_t>( \
{WHOLEMEMORY_DT_INT8, WHOLEMEMORY_DT_INT16, WHOLEMEMORY_DT_INT, WHOLEMEMORY_DT_INT64})
#define VEC_FLOAT_DOUBLE \
std::vector<wholememory_dtype_t>({WHOLEMEMORY_DT_FLOAT, WHOLEMEMORY_DT_DOUBLE})
#define VEC_HALF_FLOAT std::vector<wholememory_dtype_t>({WHOLEMEMORY_DT_HALF, WHOLEMEMORY_DT_FLOAT})
#define VEC_BF16_HALF_FLOAT \
std::vector<wholememory_dtype_t>({WHOLEMEMORY_DT_BF16, WHOLEMEMORY_DT_HALF, WHOLEMEMORY_DT_FLOAT})
#define VEC_HALF_FLOAT_DOUBLE \
std::vector<wholememory_dtype_t>( \
{WHOLEMEMORY_DT_HALF, WHOLEMEMORY_DT_FLOAT, WHOLEMEMORY_DT_DOUBLE})
#define VEC_ALLFLOAT \
std::vector<wholememory_dtype_t>( \
{WHOLEMEMORY_DT_BF16, WHOLEMEMORY_DT_HALF, WHOLEMEMORY_DT_FLOAT, WHOLEMEMORY_DT_DOUBLE})
#define VEC_ALLSINT_ALLFLOAT \
std::vector<wholememory_dtype_t>({WHOLEMEMORY_DT_INT8, \
WHOLEMEMORY_DT_INT16, \
WHOLEMEMORY_DT_INT, \
WHOLEMEMORY_DT_INT64, \
WHOLEMEMORY_DT_BF16, \
WHOLEMEMORY_DT_HALF, \
WHOLEMEMORY_DT_FLOAT, \
WHOLEMEMORY_DT_DOUBLE})
#define CASES_SINT3264(TEMPFUNC_NAME, ...) \
case WHOLEMEMORY_DT_INT: { \
TEMPFUNC_NAME<int32_t, ##__VA_ARGS__>(); \
break; \
} \
case WHOLEMEMORY_DT_INT64: { \
TEMPFUNC_NAME<int64_t, ##__VA_ARGS__>(); \
break; \
}
#define CASES_ALLSINT(TEMPFUNC_NAME, ...) \
case WHOLEMEMORY_DT_INT8: { \
TEMPFUNC_NAME<int8_t, ##__VA_ARGS__>(); \
break; \
} \
case WHOLEMEMORY_DT_INT16: { \
TEMPFUNC_NAME<int16_t, ##__VA_ARGS__>(); \
break; \
} \
CASES_SINT3264(TEMPFUNC_NAME, ##__VA_ARGS__)
#define CASES_FLOAT_DOUBLE(TEMPFUNC_NAME, ...) \
case WHOLEMEMORY_DT_FLOAT: { \
TEMPFUNC_NAME<float, ##__VA_ARGS__>(); \
break; \
} \
case WHOLEMEMORY_DT_DOUBLE: { \
TEMPFUNC_NAME<double, ##__VA_ARGS__>(); \
break; \
}
#define CASES_HALF_FLOAT(TEMPFUNC_NAME, ...) \
case WHOLEMEMORY_DT_HALF: { \
TEMPFUNC_NAME<__half, ##__VA_ARGS__>(); \
break; \
} \
case WHOLEMEMORY_DT_FLOAT: { \
TEMPFUNC_NAME<float, ##__VA_ARGS__>(); \
break; \
}
#define CASES_BF16_HALF_FLOAT(TEMPFUNC_NAME, ...) \
case WHOLEMEMORY_DT_BF16: { \
TEMPFUNC_NAME<__nv_bfloat16, ##__VA_ARGS__>(); \
break; \
} \
CASES_HALF_FLOAT(TEMPFUNC_NAME, ##__VA_ARGS__)
#define CASES_HALF_FLOAT_DOUBLE(TEMPFUNC_NAME, ...) \
case WHOLEMEMORY_DT_HALF: { \
TEMPFUNC_NAME<__half, ##__VA_ARGS__>(); \
break; \
} \
CASES_FLOAT_DOUBLE(TEMPFUNC_NAME, ##__VA_ARGS__)
#define CASES_ALLFLOAT(TEMPFUNC_NAME, ...) \
case WHOLEMEMORY_DT_BF16: { \
TEMPFUNC_NAME<__nv_bfloat16, ##__VA_ARGS__>(); \
break; \
} \
CASES_HALF_FLOAT_DOUBLE(TEMPFUNC_NAME, ##__VA_ARGS__)
#define CASES_ALLSINT_ALLFLOAT(TEMPFUNC_NAME, ...) \
CASES_ALLSINT(TEMPFUNC_NAME, ##__VA_ARGS__) \
CASES_ALLFLOAT(TEMPFUNC_NAME, ##__VA_ARGS__)
#define REGISTER_DISPATCH_ONE_TYPE(NAME, TEMPFUNC_NAME, ARG0_SET) \
static std::unordered_map<wholememory_dtype_t, \
decltype(&TEMPFUNC_NAME<int>), \
wholememory_ops::one_wmt_hash>* NAME##_dispatch1_map = nullptr; \
template <typename T0> \
void Register##NAME##Map1FuncHelper0() \
{ \
auto key = get_wholememory_dtype<T0>(); \
NAME##_dispatch1_map->emplace(key, TEMPFUNC_NAME<T0>); \
} \
__attribute__((constructor)) static void Register##NAME##Map1Func() \
{ \
NAME##_dispatch1_map = new std::unordered_map<wholememory_dtype_t, \
decltype(&TEMPFUNC_NAME<int>), \
wholememory_ops::one_wmt_hash>(); \
auto arg0_types = VEC_##ARG0_SET; \
for (auto arg0_type : arg0_types) { \
switch (arg0_type) { \
CASES_##ARG0_SET(Register##NAME##Map1FuncHelper0) default: \
{ \
WHOLEMEMORY_FAIL_NOTHROW("dispatch with type=%d for function %s failed.", \
static_cast<int>(arg0_type), \
#TEMPFUNC_NAME); \
break; \
} \
} \
} \
}
#define DISPATCH_ONE_TYPE(WMTypeValue0, NAME, ...) \
do { \
auto key = WMTypeValue0; \
auto it = NAME##_dispatch1_map->find(key); \
WHOLEMEMORY_CHECK_NOTHROW(it != NAME##_dispatch1_map->end()); \
it->second(__VA_ARGS__); \
} while (0)
#define REGISTER_DISPATCH_TWO_TYPES(NAME, TEMPFUNC_NAME, ARG0_SET, ARG1_SET) \
static std::unordered_map<std::tuple<wholememory_dtype_t, wholememory_dtype_t>, \
decltype(&TEMPFUNC_NAME<int, int>), \
wholememory_ops::two_wmt_hash>* NAME##_dispatch2_map = nullptr; \
template <typename T0, typename T1> \
void Register##NAME##Map2FuncHelper0() \
{ \
auto key = std::make_tuple(get_wholememory_dtype<T0>(), get_wholememory_dtype<T1>()); \
NAME##_dispatch2_map->emplace(key, TEMPFUNC_NAME<T0, T1>); \
} \
template <typename T1> \
void Register##NAME##Map2FuncHelper1() \
{ \
auto arg0_types = VEC_##ARG0_SET; \
for (auto arg0_type : arg0_types) { \
switch (arg0_type) { \
CASES_##ARG0_SET(Register##NAME##Map2FuncHelper0, T1) default: \
{ \
WHOLEMEMORY_FAIL_NOTHROW("dispatch with type0=%d for function %s failed.", \
static_cast<int>(arg0_type), \
#TEMPFUNC_NAME); \
break; \
} \
} \
} \
} \
__attribute__((constructor)) static void Register##NAME##Map2Func() \
{ \
NAME##_dispatch2_map = \
new std::unordered_map<std::tuple<wholememory_dtype_t, wholememory_dtype_t>, \
decltype(&TEMPFUNC_NAME<int, int>), \
wholememory_ops::two_wmt_hash>(); \
auto arg1_types = VEC_##ARG1_SET; \
for (auto arg1_type : arg1_types) { \
switch (arg1_type) { \
CASES_##ARG1_SET(Register##NAME##Map2FuncHelper1) default: \
{ \
WHOLEMEMORY_FAIL_NOTHROW("dispatch with type1=%d for function %s failed.", \
static_cast<int>(arg1_type), \
#TEMPFUNC_NAME); \
break; \
} \
} \
} \
}
#define DISPATCH_TWO_TYPES(WMTypeValue0, WMTypeValue1, NAME, ...) \
do { \
auto key = std::make_tuple(WMTypeValue0, WMTypeValue1); \
auto it = NAME##_dispatch2_map->find(key); \
WHOLEMEMORY_CHECK_NOTHROW(it != NAME##_dispatch2_map->end()); \
it->second(__VA_ARGS__); \
} while (0)
#define REGISTER_DISPATCH_THREE_TYPES(NAME, TEMPFUNC_NAME, ARG0_SET, ARG1_SET, ARG2_SET) \
static std::unordered_map< \
std::tuple<wholememory_dtype_t, wholememory_dtype_t, wholememory_dtype_t>, \
decltype(&TEMPFUNC_NAME<int, int, int>), \
wholememory_ops::three_wmt_hash>* NAME##_dispatch3_map = nullptr; \
template <typename T0, typename T1, typename T2> \
void Register##NAME##Map3FuncHelper0() \
{ \
auto key = std::make_tuple( \
get_wholememory_dtype<T0>(), get_wholememory_dtype<T1>(), get_wholememory_dtype<T2>()); \
NAME##_dispatch3_map->emplace(key, TEMPFUNC_NAME<T0, T1, T2>); \
} \
template <typename T1, typename T2> \
void Register##NAME##Map3FuncHelper1() \
{ \
auto arg0_types = VEC_##ARG0_SET; \
for (auto arg0_type : arg0_types) { \
switch (arg0_type) { \
CASES_##ARG0_SET(Register##NAME##Map3FuncHelper0, T1, T2) default: \
{ \
WHOLEMEMORY_FAIL_NOTHROW("dispatch with type0=%d for function %s failed.", \
static_cast<int>(arg0_type), \
#TEMPFUNC_NAME); \
break; \
} \
} \
} \
} \
template <typename T2> \
void Register##NAME##Map3FuncHelper2() \
{ \
auto arg1_types = VEC_##ARG1_SET; \
for (auto arg1_type : arg1_types) { \
switch (arg1_type) { \
CASES_##ARG1_SET(Register##NAME##Map3FuncHelper1, T2) default: \
{ \
WHOLEMEMORY_FAIL_NOTHROW("dispatch with type1=%d for function %s failed.", \
static_cast<int>(arg1_type), \
#TEMPFUNC_NAME); \
break; \
} \
} \
} \
} \
__attribute__((constructor)) static void Register##NAME##Map3Func() \
{ \
NAME##_dispatch3_map = new std::unordered_map< \
std::tuple<wholememory_dtype_t, wholememory_dtype_t, wholememory_dtype_t>, \
decltype(&TEMPFUNC_NAME<int, int, int>), \
wholememory_ops::three_wmt_hash>(); \
auto arg2_types = VEC_##ARG2_SET; \
for (auto arg2_type : arg2_types) { \
switch (arg2_type) { \
CASES_##ARG2_SET(Register##NAME##Map3FuncHelper2) default: \
{ \
WHOLEMEMORY_FAIL_NOTHROW("dispatch with type2=%d for function %s failed.", \
static_cast<int>(arg2_type), \
#TEMPFUNC_NAME); \
break; \
} \
} \
} \
}
#define DISPATCH_THREE_TYPES(WMTypeValue0, WMTypeValue1, WMTypeValue2, NAME, ...) \
do { \
auto key = std::make_tuple(WMTypeValue0, WMTypeValue1, WMTypeValue2); \
auto it = NAME##_dispatch3_map->find(key); \
WHOLEMEMORY_CHECK_NOTHROW(it != NAME##_dispatch3_map->end()); \
it->second(__VA_ARGS__); \
} while (0)
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/gather_op_impl.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/global_reference.h>
#include <wholememory/wholememory.h>
namespace wholememory_ops {
wholememory_error_code_t wholememory_gather_mapped(
wholememory_gref_t wholememory_gref,
wholememory_matrix_description_t wholememory_desc,
void* indices,
wholememory_array_description_t indice_desc,
void* output,
wholememory_matrix_description_t output_desc,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream);
wholememory_error_code_t wholememory_gather_nccl(wholememory_handle_t wholememory_handle,
wholememory_matrix_description_t wholememory_desc,
void* indices,
wholememory_array_description_t indice_desc,
void* output,
wholememory_matrix_description_t output_desc,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream);
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/gather_op.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <wholememory/wholememory_op.h>
#include <wholememory_ops/gather_op_impl.h>
#include "error.hpp"
#include "logger.hpp"
wholememory_error_code_t wholememory_gather(wholememory_tensor_t wholememory_tensor,
wholememory_tensor_t indices_tensor,
wholememory_tensor_t output_tensor,
wholememory_env_func_t* p_env_fns,
void* stream)
{
bool const has_handle = wholememory_tensor_has_handle(wholememory_tensor);
wholememory_memory_type_t memory_type = WHOLEMEMORY_MT_NONE;
if (has_handle) {
memory_type =
wholememory_get_memory_type(wholememory_tensor_get_memory_handle(wholememory_tensor));
}
wholememory_matrix_description_t matrix_description;
auto tensor_description = *wholememory_tensor_get_tensor_description(wholememory_tensor);
if (tensor_description.dim != 1 && tensor_description.dim != 2) {
WHOLEMEMORY_ERROR("wholememory_tensor should be 1D or 2D tensor.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (tensor_description.dim == 1) {
if (!wholememory_unsqueeze_tensor(&tensor_description, 1)) {
WHOLEMEMORY_ERROR("Input 1D wholememory_tensor unsqueeze to 2D failed.");
return WHOLEMEMORY_LOGIC_ERROR;
}
}
if (!wholememory_convert_tensor_desc_to_matrix(&matrix_description, &tensor_description)) {
WHOLEMEMORY_ERROR("Input wholememory_tensor convert to matrix failed.");
return WHOLEMEMORY_LOGIC_ERROR;
}
if (wholememory_tensor_get_tensor_description(indices_tensor)->dim != 1) {
WHOLEMEMORY_ERROR("indices tensor should be 1D tensor");
return WHOLEMEMORY_INVALID_INPUT;
}
wholememory_tensor_description_t output_tensor_desc =
*wholememory_tensor_get_tensor_description(output_tensor);
if (output_tensor_desc.dim != tensor_description.dim) {
WHOLEMEMORY_ERROR("output tensor should be same dim as wholememory_tensor.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (output_tensor_desc.dim == 1) {
if (!wholememory_unsqueeze_tensor(&output_tensor_desc, 1)) {
WHOLEMEMORY_ERROR("Output 1D wholememory_tensor unsqueeze to 2D failed.");
return WHOLEMEMORY_LOGIC_ERROR;
}
}
void* indices = wholememory_tensor_get_data_pointer(indices_tensor);
void* output = wholememory_tensor_get_data_pointer(output_tensor);
wholememory_array_description_t indices_desc;
wholememory_matrix_description_t output_desc;
if (!wholememory_convert_tensor_desc_to_array(
&indices_desc, wholememory_tensor_get_tensor_description(indices_tensor))) {
WHOLEMEMORY_ERROR("Convert indices tensor to array failed.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (!wholememory_convert_tensor_desc_to_matrix(&output_desc, &output_tensor_desc)) {
WHOLEMEMORY_ERROR("Convert output tensor to matrix failed.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (has_handle && memory_type == WHOLEMEMORY_MT_DISTRIBUTED) {
return wholememory_ops::wholememory_gather_nccl(
wholememory_tensor_get_memory_handle(wholememory_tensor),
matrix_description,
indices,
indices_desc,
output,
output_desc,
p_env_fns,
static_cast<cudaStream_t>(stream));
}
WHOLEMEMORY_EXPECTS_NOTHROW(!has_handle || memory_type == WHOLEMEMORY_MT_CHUNKED ||
memory_type == WHOLEMEMORY_MT_CONTINUOUS,
"Memory type not supported.");
wholememory_gref_t gref;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_tensor_get_global_reference(wholememory_tensor, &gref));
return wholememory_ops::wholememory_gather_mapped(gref,
matrix_description,
indices,
indices_desc,
output,
output_desc,
p_env_fns,
static_cast<cudaStream_t>(stream));
}
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/exchange_ids_nccl_func.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/tensor_description.h>
#include <wholememory/wholememory.h>
#include <wholememory_ops/temp_memory_handle.hpp>
#include <wholememory_ops/thrust_allocator.hpp>
namespace wholememory_ops {
/**
* Bucket and exchange ids using collective communication
*
* @param indices : pointer to indices array
* @param indice_desc : indices array description, should have storage offset = 0, indice can be
* int32 or int64
* @param host_recv_rank_id_count_ptr : pointer to int64_t array of received id count from each rank
* @param host_rank_id_count_ptr : pointer to int64_t array of id count to send to each rank.
* @param dev_recv_indices_buffer_handle : temp_memory_handle to create buffer for received indices.
* @param dev_raw_indice_ptr : pointer to allocated int64_t array to storage raw indices mapping of
* sort
* @param embedding_entry_count_per_rank : entry count of embedding count per rank
* @param wm_comm : WholeMemory Communicator
* @param p_thrust_allocator : thrust allocator
* @param p_env_fns : EnvFns
* @param stream : CUDA stream to use.
* @return : WHOLEMEMORY_SUCCESS on success, others on failure
*/
wholememory_error_code_t bucket_and_exchange_ids_func(
void* indices,
wholememory_array_description_t indice_desc,
int64_t* host_recv_rank_id_count_ptr,
int64_t* host_rank_id_count_ptr,
temp_memory_handle* dev_recv_indices_buffer_handle,
int64_t* dev_raw_indice_ptr,
size_t embedding_entry_count_per_rank,
wholememory_comm_t wm_comm,
wm_thrust_allocator* p_thrust_allocator,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream);
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/gather_func_impl_integer_data_int64_indices.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gather_scatter_func.cuh"
#include <wholememory/wholememory.h>
#include "logger.hpp"
#include "wholememory_ops/register.hpp"
namespace wholememory_ops {
template <typename EmbeddingT, typename OutputT>
void gather_integer_int64_temp_func(wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
void* indices,
int64_t indice_count,
void* output,
wholememory_matrix_description_t output_desc,
cudaStream_t stream)
{
gather_temp_func<EmbeddingT, int64_t, OutputT>(
embedding_gref, embedding_desc, indices, indice_count, output, output_desc, stream);
}
REGISTER_DISPATCH_TWO_TYPES(GatherFuncIntegerInt64,
gather_integer_int64_temp_func,
ALLSINT,
ALLSINT)
wholememory_error_code_t gather_integer_int64_func(wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
void* indices,
wholememory_array_description_t indices_desc,
void* output,
wholememory_matrix_description_t output_desc,
cudaStream_t stream)
{
try {
WHOLEMEMORY_CHECK(wholememory_dtype_is_integer_number(embedding_desc.dtype));
WHOLEMEMORY_CHECK(wholememory_dtype_is_integer_number(output_desc.dtype));
WHOLEMEMORY_CHECK(indices_desc.dtype == WHOLEMEMORY_DT_INT64);
DISPATCH_TWO_TYPES(
embedding_desc.dtype,
output_desc.dtype,
GatherFuncIntegerInt64,
embedding_gref,
embedding_desc,
static_cast<char*>(indices) +
indices_desc.storage_offset * wholememory_dtype_get_element_size(indices_desc.dtype),
indices_desc.size,
output,
output_desc,
stream);
} catch (const wholememory::cuda_error& wle) {
WHOLEMEMORY_ERROR("gather CUDA LOGIC Error %s\n", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (const wholememory::logic_error& le) {
WHOLEMEMORY_ERROR("gather LOGIC Error %s\n", le.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (...) {
return WHOLEMEMORY_LOGIC_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/scatter_func_impl_integer_data_int32_indices.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gather_scatter_func.cuh"
#include <wholememory/wholememory.h>
#include "logger.hpp"
#include "wholememory_ops/register.hpp"
namespace wholememory_ops {
template <typename InputT, typename EmbeddingT>
void scatter_integer_int32_temp_func(const void* input,
wholememory_matrix_description_t input_desc,
void* indices,
int64_t indice_count,
wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream)
{
scatter_temp_func<InputT, int32_t, EmbeddingT>(
input, input_desc, indices, indice_count, embedding_gref, embedding_desc, stream);
}
REGISTER_DISPATCH_TWO_TYPES(ScatterFuncIntegerInt32,
scatter_integer_int32_temp_func,
ALLSINT,
ALLSINT)
wholememory_error_code_t scatter_integer_int32_func(const void* input,
wholememory_matrix_description_t input_desc,
void* indices,
wholememory_array_description_t indices_desc,
wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream)
{
try {
WHOLEMEMORY_CHECK(wholememory_dtype_is_integer_number(embedding_desc.dtype));
WHOLEMEMORY_CHECK(wholememory_dtype_is_integer_number(input_desc.dtype));
WHOLEMEMORY_CHECK(indices_desc.dtype == WHOLEMEMORY_DT_INT);
DISPATCH_TWO_TYPES(
input_desc.dtype,
embedding_desc.dtype,
ScatterFuncIntegerInt32,
input,
input_desc,
static_cast<char*>(indices) +
indices_desc.storage_offset * wholememory_dtype_get_element_size(indices_desc.dtype),
indices_desc.size,
embedding_gref,
embedding_desc,
stream);
} catch (const wholememory::cuda_error& wle) {
WHOLEMEMORY_ERROR("scatter CUDA LOGIC Error %s\n", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (const wholememory::logic_error& le) {
WHOLEMEMORY_ERROR("scatter CUDA LOGIC Error %s\n", le.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (...) {
return WHOLEMEMORY_UNKNOW_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/gather_scatter_func.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_bf16.h>
#include <cuda_fp16.h>
#include <wholememory/device_reference.cuh>
#include <wholememory/global_reference.h>
#include <wholememory/tensor_description.h>
#include "cuda_macros.hpp"
#include "error.hpp"
#include "wholememory/integer_utils.hpp"
#include <cooperative_groups.h>
#include <cooperative_groups/memcpy_async.h>
namespace wholememory_ops {
template <typename DataTypeT>
__device__ __forceinline__ void mov_typed_data(DataTypeT* to, const DataTypeT* from)
{
*to = *from;
}
template <int DATA_SIZE>
__device__ __forceinline__ void mov_data(void* to, const void* from)
{
char* ptr_to = static_cast<char*>(to);
const char* ptr_from = static_cast<const char*>(from);
for (int i = 0; i < DATA_SIZE; i++) {
ptr_to[i] = ptr_from[i];
}
}
template <typename DataTypeT, int DATA_SIZE>
struct typed_data_vector {
DataTypeT data[DATA_SIZE];
};
template <>
struct typed_data_vector<double, 2> {
double2 data;
};
template <>
struct typed_data_vector<int64_t, 2> {
int4 data;
};
template <>
struct typed_data_vector<float, 2> {
float2 data;
};
template <>
struct typed_data_vector<float, 4> {
float4 data;
};
template <>
struct typed_data_vector<int, 2> {
int2 data;
};
template <>
struct typed_data_vector<int, 4> {
int4 data;
};
template <>
struct typed_data_vector<__half, 2> {
__half2 data;
};
template <>
struct typed_data_vector<__half, 4> {
int2 data;
};
template <>
struct typed_data_vector<__half, 8> {
int4 data;
};
template <>
struct typed_data_vector<int16_t, 2> {
int data;
};
template <>
struct typed_data_vector<int16_t, 4> {
int2 data;
};
template <>
struct typed_data_vector<int16_t, 8> {
int4 data;
};
template <>
struct typed_data_vector<nv_bfloat16, 2> {
nv_bfloat162 data;
};
template <>
struct typed_data_vector<nv_bfloat16, 4> {
int2 data;
};
template <>
struct typed_data_vector<nv_bfloat16, 8> {
int4 data;
};
template <>
struct typed_data_vector<int8_t, 2> {
int16_t data;
};
template <>
struct typed_data_vector<int8_t, 4> {
int data;
};
template <>
struct typed_data_vector<int8_t, 8> {
int2 data;
};
template <>
struct typed_data_vector<int8_t, 16> {
int4 data;
};
template <typename DataTypeT, int DATA_SIZE>
__device__ __forceinline__ DataTypeT& typed_data_vector_at(
typed_data_vector<DataTypeT, DATA_SIZE>& v, int idx)
{
return ((DataTypeT*)(&v.data))[idx];
}
template <>
__device__ __forceinline__ void mov_data<1>(void* to, const void* from)
{
mov_typed_data(static_cast<int8_t*>(to), static_cast<const int8_t*>(from));
}
template <>
__device__ __forceinline__ void mov_data<2>(void* to, const void* from)
{
mov_typed_data(static_cast<int16_t*>(to), static_cast<const int16_t*>(from));
}
template <>
__device__ __forceinline__ void mov_data<4>(void* to, const void* from)
{
mov_typed_data(static_cast<int32_t*>(to), static_cast<const int32_t*>(from));
}
template <>
__device__ __forceinline__ void mov_data<8>(void* to, const void* from)
{
mov_typed_data(static_cast<int64_t*>(to), static_cast<const int64_t*>(from));
}
template <>
__device__ __forceinline__ void mov_data<16>(void* to, const void* from)
{
mov_typed_data(static_cast<int4*>(to), static_cast<const int4*>(from));
}
template <typename DataTypeT>
class type_caster {
public:
using LoadTypeT = DataTypeT;
using StoreTypeT = DataTypeT;
static __device__ __forceinline__ LoadTypeT convert_load_data(DataTypeT data)
{
return static_cast<LoadTypeT>(data);
}
static __device__ __forceinline__ DataTypeT convert_store_data(StoreTypeT data)
{
return static_cast<DataTypeT>(data);
}
};
template <>
class type_caster<__half> {
public:
using LoadTypeT = float;
using StoreTypeT = float;
static __device__ __forceinline__ LoadTypeT convert_load_data(__half data)
{
return static_cast<LoadTypeT>(data);
}
static __device__ __forceinline__ __half convert_store_data(StoreTypeT data)
{
return static_cast<__half>(data);
}
};
template <>
class type_caster<__nv_bfloat16> {
public:
using LoadTypeT = float;
using StoreTypeT = float;
static __device__ LoadTypeT convert_load_data(__nv_bfloat16 data)
{
return static_cast<LoadTypeT>(data);
}
static __device__ __nv_bfloat16 convert_store_data(StoreTypeT data)
{
return static_cast<__nv_bfloat16>(data);
}
};
template <typename FromT, typename ToT>
__device__ __forceinline__ ToT convert_type(FromT from)
{
return type_caster<ToT>::convert_store_data(type_caster<FromT>::convert_load_data(from));
}
/**
* Determine alignment of a WholeMemory matrix, in element count, maximum 16 / element_size.
* @param embedding_desc : wholememory_matrix_description_t matrix description.
* @return : Alignment that can be used, in element count.
*/
inline int determine_wholememory_alignment_elt_count(
wholememory_matrix_description_t embedding_desc)
{
int elt_size = static_cast<int>(wholememory_dtype_get_element_size(embedding_desc.dtype));
WHOLEMEMORY_CHECK(elt_size != -1);
int alignment = 16 / elt_size;
for (; alignment > 1; alignment /= 2) {
if (embedding_desc.storage_offset % alignment == 0 &&
embedding_desc.sizes[1] % alignment == 0 && embedding_desc.stride % alignment == 0)
break;
}
return alignment;
}
/**
* Determine alignment of normal memory, in element count, maximum 16 / element_size.
* @param ptr : pointer to the memory.
* @param memory_desc : wholememory_matrix_description_t matrix description.
* @return : Alignment that can be used, in element count.
*/
inline int determine_memory_alignment_elt_count(const void* ptr,
wholememory_matrix_description_t memory_desc)
{
int elt_size = static_cast<int>(wholememory_dtype_get_element_size(memory_desc.dtype));
WHOLEMEMORY_CHECK(elt_size != -1);
int alignment = 16 / elt_size;
int64_t int_ptr = reinterpret_cast<int64_t>(ptr);
WHOLEMEMORY_CHECK(int_ptr % elt_size == 0);
int_ptr /= elt_size;
int_ptr += memory_desc.storage_offset;
for (; alignment > 1; alignment /= 2) {
if (int_ptr % alignment == 0 && memory_desc.sizes[1] % alignment == 0 &&
memory_desc.stride % alignment == 0)
break;
}
return alignment;
}
template <typename EmbeddingT, typename IndexT, typename OutputT, int ALIGNMENT = 1>
__global__ void gather_func_kernel(wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
const IndexT* indices,
int64_t indice_count,
OutputT* output,
wholememory_matrix_description_t output_desc)
{
auto block = cooperative_groups::this_thread_block();
auto mywarp = cooperative_groups::tiled_partition<32>(block);
__shared__ char shm_in_char[16384];
OutputT* all_sh = reinterpret_cast<OutputT*>(shm_in_char);
OutputT* my_shared;
int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) / 32;
int lane_id = threadIdx.x % 32;
int embedding_size = embedding_desc.sizes[1];
int64_t embedding_stride = embedding_desc.stride;
int64_t output_stride = output_desc.stride;
int shm_size = 16384 / sizeof(OutputT);
wholememory::device_reference<EmbeddingT> embedding_dev_ref(embedding_gref);
typed_data_vector<EmbeddingT, ALIGNMENT> embeddings;
typed_data_vector<OutputT, ALIGNMENT> outputs;
bool use_shm = true;
if (shm_size / (blockDim.x / 32) < output_desc.sizes[1]) { //
use_shm = false;
} else {
my_shared = all_sh + shm_size / (blockDim.x / 32) * (threadIdx.x / 32);
}
for (int64_t output_idx = warp_id; output_idx < indice_count;
output_idx += gridDim.x * (blockDim.x / 32)) {
OutputT* output_ptr = output + output_desc.storage_offset + output_stride * output_idx;
if (!use_shm) { my_shared = output_ptr; }
int64_t embedding_table_idx = indices[output_idx];
if (embedding_table_idx < 0) continue;
EmbeddingT* emb_ptr =
&embedding_dev_ref[embedding_desc.storage_offset + embedding_table_idx * embedding_stride];
for (int emb_idx = lane_id * ALIGNMENT; emb_idx < embedding_size; emb_idx += ALIGNMENT * 32) {
mov_data<sizeof(EmbeddingT) * ALIGNMENT>(&embeddings, emb_ptr + emb_idx);
#pragma unroll
for (int sub_idx = 0; sub_idx < ALIGNMENT; sub_idx++) {
typed_data_vector_at(outputs, sub_idx) =
convert_type<EmbeddingT, OutputT>(typed_data_vector_at(embeddings, sub_idx));
}
mov_data<sizeof(OutputT) * ALIGNMENT>(my_shared + emb_idx, &outputs);
}
if (use_shm) {
int copy_size = output_desc.sizes[1] * sizeof(OutputT);
cooperative_groups::memcpy_async(mywarp, output_ptr, my_shared, copy_size);
cooperative_groups::wait(mywarp);
}
}
return;
}
template <typename EmbeddingT, typename IndexT, typename OutputT>
void gather_temp_func(wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
void* indices,
int64_t indice_count,
void* output,
wholememory_matrix_description_t output_desc,
cudaStream_t stream)
{
WHOLEMEMORY_EXPECTS(output_desc.sizes[0] == indice_count,
"gather_func, output shape[0]=%ld, but indice_count=%ld",
output_desc.sizes[0],
indice_count);
if (indice_count == 0 || embedding_desc.sizes[1] == 0) return;
int wm_alignment = determine_wholememory_alignment_elt_count(embedding_desc);
int mm_alignment = determine_memory_alignment_elt_count(output, output_desc);
int alignment = std::min<int>(wm_alignment, mm_alignment);
// int embedding_size = embedding_desc.sizes[1];
// int thread_num = wholememory::div_rounding_up_safe<int>(embedding_size, alignment);
// thread_num = std::min(thread_num, 512);
// int64_t block_count = indice_count >= 1024 ? 1024 : static_cast<int>(indice_count);
void (*kernel_fn)(wholememory_gref_t,
wholememory_matrix_description_t,
const IndexT*,
int64_t,
OutputT*,
wholememory_matrix_description_t) = nullptr;
switch (alignment) {
case 16: {
kernel_fn = gather_func_kernel<EmbeddingT, IndexT, OutputT, 16>;
break;
}
case 8: {
kernel_fn = gather_func_kernel<EmbeddingT, IndexT, OutputT, 8>;
break;
}
case 4: {
kernel_fn = gather_func_kernel<EmbeddingT, IndexT, OutputT, 4>;
break;
}
case 2: {
kernel_fn = gather_func_kernel<EmbeddingT, IndexT, OutputT, 2>;
break;
}
case 1: {
kernel_fn = gather_func_kernel<EmbeddingT, IndexT, OutputT, 1>;
break;
}
default: {
WHOLEMEMORY_FAIL("gather func alignment=%d.", alignment);
return;
}
}
int block_size = 1024;
int block_count = indice_count > 1568 ? 1568 : indice_count;
kernel_fn<<<block_count, block_size, 0, stream>>>(embedding_gref,
embedding_desc,
static_cast<const IndexT*>(indices),
indice_count,
static_cast<OutputT*>(output),
output_desc);
WM_CUDA_CHECK(cudaGetLastError());
}
template <typename InputT, typename IndexT, typename EmbeddingT, int ALIGNMENT = 1>
__global__ void scatter_func_kernel(const InputT* input,
wholememory_matrix_description_t input_desc,
const IndexT* indices,
int64_t indice_count,
wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc)
{
auto block = cooperative_groups::this_thread_block();
auto mywarp = cooperative_groups::tiled_partition<32>(block);
__shared__ char shm_in_char[24576];
InputT* all_sh = reinterpret_cast<InputT*>(shm_in_char);
InputT* my_shared;
int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) / 32;
int lane_id = threadIdx.x % 32;
int embedding_size = embedding_desc.sizes[1];
int64_t embedding_stride = embedding_desc.stride;
int64_t input_stride = input_desc.stride;
int async_copy_align = sizeof(InputT) > 4 ? 1 : 4 / sizeof(InputT);
int shm_size = 24576 / sizeof(InputT);
int batch_size = (shm_size / (blockDim.x / 32) - async_copy_align) /
input_stride; // indices batch size in lines
wholememory::device_reference<EmbeddingT> embedding_dev_ref(embedding_gref);
typed_data_vector<EmbeddingT, ALIGNMENT> embeddings;
typed_data_vector<InputT, ALIGNMENT> inputs;
int input_off_tail =
input_desc.storage_offset %
async_copy_align; // this is crutial for copy alignment, 4 bytes as alignment;
bool use_shm = true;
if (batch_size <= 0) {
use_shm = false;
batch_size = 1;
} else {
my_shared = all_sh + shm_size / (blockDim.x / 32) * (threadIdx.x / 32);
}
for (int64_t input_idx = warp_id * batch_size; input_idx < indice_count;
input_idx += gridDim.x * (blockDim.x / 32) * batch_size) {
int cur_idx_lines =
(indice_count - input_idx) > batch_size ? batch_size : indice_count - input_idx;
const InputT* input_ptr =
input + input_desc.storage_offset - input_off_tail + input_stride * input_idx;
// this variable is also for alignment
if (use_shm) {
int copy_size = input_off_tail + cur_idx_lines * input_stride;
if (input_idx + cur_idx_lines < indice_count) // input_dim * sizeof(InputT) > 4 is needed
copy_size = (copy_size + async_copy_align - 1) / async_copy_align * async_copy_align;
copy_size *= sizeof(InputT);
cooperative_groups::memcpy_async(mywarp, my_shared, input_ptr, copy_size);
cooperative_groups::wait(mywarp);
}
for (int e = 0; e < cur_idx_lines; e++) {
int64_t embedding_table_idx = indices[input_idx + e];
if (embedding_table_idx < 0) continue;
EmbeddingT* emb_ptr =
&embedding_dev_ref[embedding_desc.storage_offset + embedding_table_idx * embedding_stride];
for (int emb_idx = lane_id * ALIGNMENT; emb_idx < embedding_size; emb_idx += ALIGNMENT * 32) {
if (use_shm)
mov_data<sizeof(InputT) * ALIGNMENT>(
&inputs, my_shared + input_off_tail + e * input_stride + emb_idx);
else
mov_data<sizeof(InputT) * ALIGNMENT>(
&inputs, input_ptr + input_off_tail + e * input_stride + emb_idx);
#pragma unroll
for (int sub_idx = 0; sub_idx < ALIGNMENT; sub_idx++) {
typed_data_vector_at(embeddings, sub_idx) =
convert_type<InputT, EmbeddingT>(typed_data_vector_at(inputs, sub_idx));
}
mov_data<sizeof(EmbeddingT) * ALIGNMENT>(emb_ptr + emb_idx, &embeddings);
}
}
mywarp.sync();
}
return;
}
template <typename InputT, typename IndexT, typename EmbeddingT>
void scatter_temp_func(const void* input,
wholememory_matrix_description_t input_desc,
void* indices,
int64_t indice_count,
wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream)
{
WHOLEMEMORY_EXPECTS(input_desc.sizes[0] == indice_count,
"scatter_func, input shape[0]=%ld, but indice_count=%ld",
input_desc.sizes[0],
indice_count);
if (indice_count == 0 || embedding_desc.sizes[1] == 0) return;
int wm_alignment = determine_wholememory_alignment_elt_count(embedding_desc);
int mm_alignment = determine_memory_alignment_elt_count(input, input_desc);
int alignment = std::min<int>(wm_alignment, mm_alignment);
void (*kernel_fn)(const InputT*,
wholememory_matrix_description_t,
const IndexT*,
int64_t,
wholememory_gref_t,
wholememory_matrix_description_t) = nullptr;
switch (alignment) {
case 16: {
kernel_fn = scatter_func_kernel<InputT, IndexT, EmbeddingT, 16>;
break;
}
case 8: {
kernel_fn = scatter_func_kernel<InputT, IndexT, EmbeddingT, 8>;
break;
}
case 4: {
kernel_fn = scatter_func_kernel<InputT, IndexT, EmbeddingT, 4>;
break;
}
case 2: {
kernel_fn = scatter_func_kernel<InputT, IndexT, EmbeddingT, 2>;
break;
}
case 1: {
kernel_fn = scatter_func_kernel<InputT, IndexT, EmbeddingT, 1>;
break;
}
default: {
WHOLEMEMORY_FAIL("scatter func alignment=%d.", alignment);
return;
}
}
int block_size = 256;
int block_count = indice_count > 1568 ? 1568 : indice_count;
kernel_fn<<<block_count, block_size, 0, stream>>>(static_cast<const InputT*>(input),
input_desc,
static_cast<const IndexT*>(indices),
indice_count,
embedding_gref,
embedding_desc);
WM_CUDA_CHECK(cudaGetLastError());
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/embedding_optimizer_func.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "embedding_optimizer_func.h"
#include "cuda_macros.hpp"
#include "error.hpp"
#include "logger.hpp"
#include "wholememory/integer_utils.hpp"
#include "wholememory_ops/functions/embedding_cache_func.cuh"
#include "wholememory_ops/register.hpp"
#include <wholememory/device_reference.cuh>
namespace wholememory_ops {
__global__ void set_float_kernel(float* data_ptr, float value, size_t elt_count)
{
int64_t idx = blockIdx.x;
idx *= blockDim.x;
idx += threadIdx.x;
if (idx >= elt_count) return;
data_ptr[idx] = value;
}
void set_memory_to_float_value(float* data_ptr, float value, size_t elt_count, cudaStream_t stream)
{
const int thread_count = 128;
int block_count = wholememory::div_rounding_up_safe<int64_t>(elt_count, thread_count);
set_float_kernel<<<block_count, thread_count, 0, stream>>>(data_ptr, value, elt_count);
WM_CUDA_CHECK_NO_THROW(cudaGetLastError());
}
template <int PerElementCount = 0>
static void check_optimizer_inputs(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
wholememory_tensor_t local_embedding_cache_tag,
wholememory_tensor_t local_embedding_cache_data,
wholememory_tensor_t per_element_local_state,
wholememory_tensor_t per_element_local_cache_tag,
wholememory_tensor_t per_element_local_cache_data,
int cache_set_coverage)
{
WHOLEMEMORY_CHECK_NOTHROW(indices != nullptr && grads != nullptr && local_embedding != nullptr);
if (cache_set_coverage > 0) {
WHOLEMEMORY_CHECK_NOTHROW(local_embedding_cache_tag != nullptr &&
local_embedding_cache_data != nullptr);
}
if (PerElementCount > 0) {
WHOLEMEMORY_CHECK_NOTHROW(per_element_local_state != nullptr);
if (cache_set_coverage > 0) {
WHOLEMEMORY_CHECK_NOTHROW(per_element_local_cache_tag != nullptr &&
per_element_local_cache_data != nullptr);
}
}
auto* indices_desc = wholememory_tensor_get_tensor_description(indices);
WHOLEMEMORY_CHECK_NOTHROW(indices_desc->dim == 1);
WHOLEMEMORY_CHECK_NOTHROW(indices_desc->dtype == WHOLEMEMORY_DT_INT ||
indices_desc->dtype == WHOLEMEMORY_DT_INT64);
WHOLEMEMORY_CHECK_NOTHROW(indices_desc->storage_offset == 0);
auto* grads_desc = wholememory_tensor_get_tensor_description(grads);
WHOLEMEMORY_CHECK_NOTHROW(grads_desc->dim == 2);
WHOLEMEMORY_CHECK_NOTHROW(grads_desc->dtype == WHOLEMEMORY_DT_FLOAT);
WHOLEMEMORY_CHECK_NOTHROW(grads_desc->storage_offset == 0);
WHOLEMEMORY_CHECK_NOTHROW(grads_desc->sizes[0] == indices_desc->sizes[0]);
int embedding_dim = grads_desc->sizes[1];
auto* local_embedding_desc = wholememory_tensor_get_tensor_description(local_embedding);
WHOLEMEMORY_CHECK_NOTHROW(local_embedding_desc->dim == 2);
WHOLEMEMORY_CHECK_NOTHROW(local_embedding_desc->dtype == WHOLEMEMORY_DT_FLOAT);
WHOLEMEMORY_CHECK_NOTHROW(local_embedding_desc->storage_offset == 0);
int padded_embedding_dim = local_embedding_desc->strides[0];
int64_t local_embedding_entry_count = local_embedding_desc->sizes[0];
// WHOLEMEMORY_CHECK_NOTHROW(embedding_dim <= padded_embedding_dim);
// if (wholememory::round_up_unsafe<int>(embedding_dim, 4) != padded_embedding_dim) {
// WHOLEMEMORY_FAIL_NOTHROW("embedding_dim=%d, but padded_embedding_dim=%d", embedding_dim,
// padded_embedding_dim);
// }
WHOLEMEMORY_CHECK_NOTHROW(wholememory::round_up_unsafe<int>(embedding_dim, 4) ==
padded_embedding_dim);
if (cache_set_coverage > 0) {
auto* local_embedding_cache_tag_desc =
wholememory_tensor_get_tensor_description(local_embedding_cache_tag);
WHOLEMEMORY_CHECK_NOTHROW(local_embedding_cache_tag_desc->dim == 2);
WHOLEMEMORY_CHECK_NOTHROW(local_embedding_cache_tag_desc->dtype == WHOLEMEMORY_DT_INT16);
WHOLEMEMORY_CHECK_NOTHROW(local_embedding_cache_tag_desc->storage_offset == 0);
WHOLEMEMORY_CHECK_NOTHROW(local_embedding_cache_tag_desc->sizes[1] == 32);
int64_t local_cache_set_count = local_embedding_cache_tag_desc->sizes[0];
WHOLEMEMORY_CHECK_NOTHROW(local_cache_set_count * cache_set_coverage >=
local_embedding_entry_count);
auto* local_embedding_cache_data_desc =
wholememory_tensor_get_tensor_description(local_embedding_cache_data);
WHOLEMEMORY_CHECK_NOTHROW(local_embedding_cache_data_desc->dim == 2);
WHOLEMEMORY_CHECK_NOTHROW(local_embedding_cache_data_desc->dtype == WHOLEMEMORY_DT_FLOAT);
WHOLEMEMORY_CHECK_NOTHROW(local_embedding_cache_data_desc->storage_offset == 0);
WHOLEMEMORY_CHECK_NOTHROW(local_embedding_cache_data_desc->strides[0] == padded_embedding_dim);
WHOLEMEMORY_CHECK_NOTHROW(local_embedding_cache_data_desc->sizes[0] ==
local_cache_set_count * 32);
}
if (PerElementCount > 0) {
auto* local_per_element_desc =
wholememory_tensor_get_tensor_description(per_element_local_state);
WHOLEMEMORY_CHECK_NOTHROW(local_per_element_desc->dim == 2);
WHOLEMEMORY_CHECK_NOTHROW(local_per_element_desc->dtype == WHOLEMEMORY_DT_FLOAT);
WHOLEMEMORY_CHECK_NOTHROW(local_per_element_desc->storage_offset == 0);
WHOLEMEMORY_CHECK_NOTHROW(local_per_element_desc->sizes[0] == local_embedding_entry_count);
WHOLEMEMORY_CHECK_NOTHROW(local_per_element_desc->sizes[1] ==
PerElementCount * (int64_t)padded_embedding_dim);
int64_t local_per_element_dim = local_per_element_desc->sizes[1];
if (cache_set_coverage > 0) {
auto* per_element_local_cache_tag_desc =
wholememory_tensor_get_tensor_description(per_element_local_cache_tag);
WHOLEMEMORY_CHECK_NOTHROW(per_element_local_cache_tag_desc->dim == 2);
WHOLEMEMORY_CHECK_NOTHROW(per_element_local_cache_tag_desc->dtype == WHOLEMEMORY_DT_INT16);
WHOLEMEMORY_CHECK_NOTHROW(per_element_local_cache_tag_desc->storage_offset == 0);
WHOLEMEMORY_CHECK_NOTHROW(per_element_local_cache_tag_desc->sizes[1] == 32);
int64_t local_cache_set_count = per_element_local_cache_tag_desc->sizes[0];
WHOLEMEMORY_CHECK_NOTHROW(local_cache_set_count * cache_set_coverage >=
local_embedding_entry_count);
auto* per_element_local_cache_data_desc =
wholememory_tensor_get_tensor_description(per_element_local_cache_data);
WHOLEMEMORY_CHECK_NOTHROW(per_element_local_cache_data_desc->dim == 2);
WHOLEMEMORY_CHECK_NOTHROW(per_element_local_cache_data_desc->dtype == WHOLEMEMORY_DT_FLOAT);
WHOLEMEMORY_CHECK_NOTHROW(per_element_local_cache_data_desc->storage_offset == 0);
WHOLEMEMORY_CHECK_NOTHROW(per_element_local_cache_data_desc->sizes[1] ==
local_per_element_dim);
WHOLEMEMORY_CHECK_NOTHROW(per_element_local_cache_data_desc->sizes[0] ==
local_cache_set_count * 32);
}
}
}
template <bool UseCache>
static __device__ __forceinline__ float* optimizer_get_ptr_from_cache(float* local_ptr,
uint16_t* local_cache_tag_ptr,
float* local_cache_data_ptr,
int64_t indice_in_local_rank,
int embedding_stride,
int cache_set_coverage)
{
float* non_cached_ptr = local_ptr + indice_in_local_rank * embedding_stride;
if (!UseCache) { return non_cached_ptr; }
int local_cache_set_id = indice_in_local_rank / cache_set_coverage;
int local_id =
indice_in_local_rank - static_cast<int64_t>(local_cache_set_id) * cache_set_coverage;
local_cache_tag_ptr += static_cast<int64_t>(local_cache_set_id) * CacheLineInfo::kCacheSetSize;
CacheLineInfo cache_line_info;
cache_line_info.LoadTag(local_cache_tag_ptr);
int const cached_line_id = cache_line_info.KeyIndexSync(local_id);
if (cached_line_id == -1) { return non_cached_ptr; }
cache_line_info.SetModified(local_id);
if (threadIdx.x == cached_line_id) { local_cache_tag_ptr[threadIdx.x] = cache_line_info.tag_; }
return local_cache_data_ptr +
(static_cast<int64_t>(local_cache_set_id) * CacheLineInfo::kCacheSetSize +
cached_line_id) *
embedding_stride;
}
template <typename IndiceT, bool UseCache>
__global__ void sgd_optimizer_step_kernel(const IndiceT* indices_ptr,
const float* grads_ptr,
float* local_embedding_ptr,
uint16_t* local_embedding_cache_tag_ptr,
float* local_embedding_cache_data_ptr,
int64_t local_entry_offset,
int embedding_dim,
int grad_stride,
int local_embedding_stride,
int cache_set_coverage,
float weight_decay,
float lr)
{
int64_t block_idx = blockIdx.x;
auto indice = indices_ptr[block_idx];
grads_ptr += block_idx * grad_stride;
IndiceT local_rank_indice = indice - local_entry_offset;
__shared__ float* s_embedding_ptr;
float* embedding_ptr;
if (threadIdx.x < 32) {
embedding_ptr = optimizer_get_ptr_from_cache<UseCache>(local_embedding_ptr,
local_embedding_cache_tag_ptr,
local_embedding_cache_data_ptr,
local_rank_indice,
local_embedding_stride,
cache_set_coverage);
if (threadIdx.x == 0) { s_embedding_ptr = embedding_ptr; }
}
__syncthreads();
embedding_ptr = s_embedding_ptr;
int loop_start_idx = 0;
for (; loop_start_idx < embedding_dim; loop_start_idx += blockDim.x) {
int local_dim_idx = threadIdx.x;
float grad_value = 0.0f;
int embedding_idx = local_dim_idx + loop_start_idx;
if (embedding_idx < embedding_dim) { grad_value = grads_ptr[embedding_idx]; }
float embedding_value = embedding_ptr[embedding_idx];
grad_value += weight_decay * embedding_value;
embedding_value -= lr * grad_value;
embedding_ptr[embedding_idx] = embedding_value;
}
}
template <typename IndiceT>
void sgd_optimizer_step_temp_func(const void* indices_ptr,
const float* grads_ptr,
float* local_embedding_ptr,
uint16_t* local_embedding_cache_tag_ptr,
float* local_embedding_cache_data_ptr,
int64_t local_entry_offset,
int indice_count,
int embedding_dim,
int grad_stride,
int local_embedding_stride,
int cache_set_coverage,
float weight_decay,
float lr,
cudaStream_t stream)
{
const IndiceT* typed_indices_ptr = static_cast<const IndiceT*>(indices_ptr);
int block_count = indice_count;
if (block_count == 0) return;
int thread_count = wholememory::div_rounding_up_unsafe(embedding_dim, 4);
if (thread_count > 512) thread_count = 512;
if (thread_count < 32) thread_count = 32;
auto func_ptr = sgd_optimizer_step_kernel<IndiceT, false>;
if (cache_set_coverage > 0) { func_ptr = sgd_optimizer_step_kernel<IndiceT, true>; }
func_ptr<<<block_count, thread_count, 0, stream>>>(typed_indices_ptr,
grads_ptr,
local_embedding_ptr,
local_embedding_cache_tag_ptr,
local_embedding_cache_data_ptr,
local_entry_offset,
embedding_dim,
grad_stride,
local_embedding_stride,
cache_set_coverage,
weight_decay,
lr);
WM_CUDA_CHECK(cudaGetLastError());
WM_CUDA_DEBUG_SYNC_STREAM(stream);
}
REGISTER_DISPATCH_ONE_TYPE(SGDOptimizerStepTempFunc, sgd_optimizer_step_temp_func, SINT3264)
wholememory_error_code_t sgd_optimizer_step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
wholememory_tensor_t local_embedding_cache_tag,
wholememory_tensor_t local_embedding_cache_data,
int64_t local_entry_offset,
int cache_set_coverage,
float weight_decay,
float lr,
cudaStream_t stream)
{
try {
check_optimizer_inputs<0>(indices,
grads,
local_embedding,
local_embedding_cache_tag,
local_embedding_cache_data,
nullptr,
nullptr,
nullptr,
cache_set_coverage);
auto* indice_desc = wholememory_tensor_get_tensor_description(indices);
auto* grads_desc = wholememory_tensor_get_tensor_description(grads);
auto* local_embedding_desc = wholememory_tensor_get_tensor_description(local_embedding);
uint16_t* local_embedding_cache_tag_pr = nullptr;
float* local_embedding_cache_data_ptr = nullptr;
if (cache_set_coverage > 0) {
local_embedding_cache_tag_pr =
static_cast<uint16_t*>(wholememory_tensor_get_data_pointer(local_embedding_cache_tag));
local_embedding_cache_data_ptr =
static_cast<float*>(wholememory_tensor_get_data_pointer(local_embedding_cache_data));
}
DISPATCH_ONE_TYPE(indice_desc->dtype,
SGDOptimizerStepTempFunc,
wholememory_tensor_get_data_pointer(indices),
static_cast<float*>(wholememory_tensor_get_data_pointer(grads)),
static_cast<float*>(wholememory_tensor_get_data_pointer(local_embedding)),
local_embedding_cache_tag_pr,
local_embedding_cache_data_ptr,
local_entry_offset,
indice_desc->sizes[0],
grads_desc->sizes[1],
grads_desc->strides[0],
local_embedding_desc->strides[0],
cache_set_coverage,
weight_decay,
lr,
stream);
} catch (wholememory::logic_error& wle) {
WHOLEMEMORY_ERROR("%s", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (wholememory::cuda_error& wce) {
WHOLEMEMORY_ERROR("%s", wce.what());
return WHOLEMEMORY_CUDA_ERROR;
} catch (...) {
WHOLEMEMORY_ERROR("File %s, line %d, Unknown error", __FILE__, __LINE__);
return WHOLEMEMORY_UNKNOW_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
template <typename IndiceT, bool UseCache, bool AdamW = false>
__global__ void lazy_adam_optimizer_step_kernel(const IndiceT* indices_ptr,
const float* grads_ptr,
float* local_embedding_ptr,
uint16_t* local_embedding_cache_tag_ptr,
float* local_embedding_cache_data_ptr,
float* per_element_local_embedding_ptr,
uint16_t* per_element_local_cache_tag_ptr,
float* per_element_local_cache_data_ptr,
float* per_embedding_state_local_ptr,
int64_t local_entry_offset,
int embedding_dim,
int grad_stride,
int local_embedding_stride,
int cache_set_coverage,
float weight_decay,
float epsilon,
float beta1,
float beta2,
float lr)
{
int64_t block_idx = blockIdx.x;
auto indice = indices_ptr[block_idx];
grads_ptr += block_idx * grad_stride;
IndiceT local_rank_indice = indice - local_entry_offset;
per_embedding_state_local_ptr += static_cast<int64_t>(local_rank_indice) * 2;
__shared__ float *s_embedding_ptr, *s_per_element_ptr;
float *embedding_ptr, *per_element_ptr;
if (threadIdx.x < 32) {
embedding_ptr = optimizer_get_ptr_from_cache<UseCache>(local_embedding_ptr,
local_embedding_cache_tag_ptr,
local_embedding_cache_data_ptr,
local_rank_indice,
local_embedding_stride,
cache_set_coverage);
per_element_ptr = optimizer_get_ptr_from_cache<UseCache>(per_element_local_embedding_ptr,
per_element_local_cache_tag_ptr,
per_element_local_cache_data_ptr,
local_rank_indice,
local_embedding_stride * 2,
cache_set_coverage);
if (threadIdx.x == 0) {
s_embedding_ptr = embedding_ptr;
s_per_element_ptr = per_element_ptr;
}
}
__syncthreads();
embedding_ptr = s_embedding_ptr;
per_element_ptr = s_per_element_ptr;
float* m_ptr = per_element_ptr;
float* v_ptr = per_element_ptr + local_embedding_stride;
float beta1t = per_embedding_state_local_ptr[0];
float beta2t = per_embedding_state_local_ptr[1];
beta1t *= beta1;
beta2t *= beta2;
int loop_start_idx = 0;
for (; loop_start_idx < embedding_dim; loop_start_idx += blockDim.x) {
int local_dim_idx = threadIdx.x;
float grad_value = 0.0f;
int embedding_idx = local_dim_idx + loop_start_idx;
if (embedding_idx < embedding_dim) { grad_value = grads_ptr[local_dim_idx + loop_start_idx]; }
float embedding_value = embedding_ptr[embedding_idx];
if (AdamW) {
embedding_value -= lr * weight_decay * embedding_value;
} else {
grad_value = grad_value + weight_decay * embedding_value;
}
float m = m_ptr[embedding_idx];
float v = v_ptr[embedding_idx];
m = beta1 * m + (1 - beta1) * grad_value;
v = beta2 * v + (1 - beta2) * grad_value * grad_value;
float mhat = m / (1 - beta1t);
float vhat = v / (1 - beta2t);
embedding_value = embedding_value - lr * mhat / (sqrtf(vhat) + epsilon);
m_ptr[embedding_idx] = m;
v_ptr[embedding_idx] = v;
embedding_ptr[embedding_idx] = embedding_value;
}
if (threadIdx.x == 0) {
per_embedding_state_local_ptr[0] = beta1t;
per_embedding_state_local_ptr[1] = beta2t;
}
}
template <typename IndiceT>
void lazy_adam_optimizer_step_temp_func(const void* indices_ptr,
const float* grads_ptr,
float* local_embedding_ptr,
uint16_t* local_embedding_cache_tag_ptr,
float* local_embedding_cache_data_ptr,
float* per_element_local_embedding_ptr,
uint16_t* per_element_local_cache_tag_ptr,
float* per_element_local_cache_data_ptr,
float* per_embedding_state_local_ptr,
int64_t local_entry_offset,
int indice_count,
int embedding_dim,
int grad_stride,
int local_embedding_stride,
int cache_set_coverage,
float weight_decay,
float epsilon,
float beta1,
float beta2,
bool adam_w,
float lr,
cudaStream_t stream)
{
const IndiceT* typed_indices_ptr = static_cast<const IndiceT*>(indices_ptr);
int block_count = indice_count;
if (block_count == 0) return;
int thread_count = wholememory::div_rounding_up_unsafe(embedding_dim, 4);
if (thread_count > 512) thread_count = 512;
if (thread_count < 32) thread_count = 32;
auto func_ptr = lazy_adam_optimizer_step_kernel<IndiceT, false>;
if (cache_set_coverage > 0) {
if (adam_w == false) {
func_ptr = lazy_adam_optimizer_step_kernel<IndiceT, true, false>;
} else {
func_ptr = lazy_adam_optimizer_step_kernel<IndiceT, true, true>;
}
} else {
if (adam_w == false) {
func_ptr = lazy_adam_optimizer_step_kernel<IndiceT, false, false>;
} else {
func_ptr = lazy_adam_optimizer_step_kernel<IndiceT, false, true>;
}
}
func_ptr<<<block_count, thread_count, 0, stream>>>(typed_indices_ptr,
grads_ptr,
local_embedding_ptr,
local_embedding_cache_tag_ptr,
local_embedding_cache_data_ptr,
per_element_local_embedding_ptr,
per_element_local_cache_tag_ptr,
per_element_local_cache_data_ptr,
per_embedding_state_local_ptr,
local_entry_offset,
embedding_dim,
grad_stride,
local_embedding_stride,
cache_set_coverage,
weight_decay,
epsilon,
beta1,
beta2,
lr);
WM_CUDA_CHECK(cudaGetLastError());
WM_CUDA_DEBUG_SYNC_STREAM(stream);
}
REGISTER_DISPATCH_ONE_TYPE(LazyAdamOptimizerStepTempFunc,
lazy_adam_optimizer_step_temp_func,
SINT3264)
wholememory_error_code_t lazy_adam_optimizer_step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
wholememory_tensor_t local_embedding_cache_tag,
wholememory_tensor_t local_embedding_cache_data,
wholememory_tensor_t per_element_local_state,
wholememory_tensor_t per_element_local_cache_tag,
wholememory_tensor_t per_element_local_cache_data,
wholememory_tensor_t per_embedding_local_state,
int64_t local_entry_offset,
int cache_set_coverage,
float weight_decay,
float epsilon,
float beta1,
float beta2,
bool adam_w,
float lr,
cudaStream_t stream)
{
try {
check_optimizer_inputs<2>(indices,
grads,
local_embedding,
local_embedding_cache_tag,
local_embedding_cache_data,
per_element_local_state,
per_element_local_cache_tag,
per_element_local_cache_data,
cache_set_coverage);
auto* indice_desc = wholememory_tensor_get_tensor_description(indices);
auto* grads_desc = wholememory_tensor_get_tensor_description(grads);
auto* local_embedding_desc = wholememory_tensor_get_tensor_description(local_embedding);
int64_t local_embedding_entry_count = local_embedding_desc->sizes[0];
WHOLEMEMORY_CHECK_NOTHROW(per_embedding_local_state != nullptr);
auto* per_embedding_local_state_desc =
wholememory_tensor_get_tensor_description(per_embedding_local_state);
WHOLEMEMORY_CHECK_NOTHROW(per_embedding_local_state_desc->dim == 2);
WHOLEMEMORY_CHECK_NOTHROW(per_embedding_local_state_desc->dtype == WHOLEMEMORY_DT_FLOAT);
WHOLEMEMORY_CHECK_NOTHROW(per_embedding_local_state_desc->storage_offset == 0);
WHOLEMEMORY_CHECK_NOTHROW(per_embedding_local_state_desc->sizes[1] == 2);
if (local_embedding_entry_count != per_embedding_local_state_desc->sizes[0]) {
WHOLEMEMORY_FAIL_NOTHROW(
"local_embedding_entry_count=%ld, but per_embedding_local_state_desc->sizes[0]=%ld",
local_embedding_entry_count,
per_embedding_local_state_desc->sizes[0]);
}
WHOLEMEMORY_CHECK_NOTHROW(local_embedding_entry_count ==
per_embedding_local_state_desc->sizes[0]);
uint16_t* local_embedding_cache_tag_pr = nullptr;
float* local_embedding_cache_data_ptr = nullptr;
uint16_t* per_element_local_cache_tag_ptr = nullptr;
float* per_element_local_cache_data_ptr = nullptr;
if (cache_set_coverage > 0) {
local_embedding_cache_tag_pr =
static_cast<uint16_t*>(wholememory_tensor_get_data_pointer(local_embedding_cache_tag));
local_embedding_cache_data_ptr =
static_cast<float*>(wholememory_tensor_get_data_pointer(local_embedding_cache_data));
per_element_local_cache_tag_ptr =
static_cast<uint16_t*>(wholememory_tensor_get_data_pointer(per_element_local_cache_tag));
per_element_local_cache_data_ptr =
static_cast<float*>(wholememory_tensor_get_data_pointer(per_element_local_cache_data));
}
DISPATCH_ONE_TYPE(
indice_desc->dtype,
LazyAdamOptimizerStepTempFunc,
wholememory_tensor_get_data_pointer(indices),
static_cast<float*>(wholememory_tensor_get_data_pointer(grads)),
static_cast<float*>(wholememory_tensor_get_data_pointer(local_embedding)),
local_embedding_cache_tag_pr,
local_embedding_cache_data_ptr,
static_cast<float*>(wholememory_tensor_get_data_pointer(per_element_local_state)),
per_element_local_cache_tag_ptr,
per_element_local_cache_data_ptr,
static_cast<float*>(wholememory_tensor_get_data_pointer(per_embedding_local_state)),
local_entry_offset,
indice_desc->sizes[0],
grads_desc->sizes[1],
grads_desc->strides[0],
local_embedding_desc->strides[0],
cache_set_coverage,
weight_decay,
epsilon,
beta1,
beta2,
adam_w,
lr,
stream);
} catch (wholememory::logic_error& wle) {
WHOLEMEMORY_ERROR("%s", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (wholememory::cuda_error& wce) {
WHOLEMEMORY_ERROR("%s", wce.what());
return WHOLEMEMORY_CUDA_ERROR;
} catch (...) {
WHOLEMEMORY_ERROR("File %s, line %d, Unknown error", __FILE__, __LINE__);
return WHOLEMEMORY_UNKNOW_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
template <typename IndiceT, bool UseCache>
__global__ void ada_grad_optimizer_step_kernel(const IndiceT* indices_ptr,
const float* grads_ptr,
float* local_embedding_ptr,
uint16_t* local_embedding_cache_tag_ptr,
float* local_embedding_cache_data_ptr,
float* per_element_local_embedding_ptr,
uint16_t* per_element_local_cache_tag_ptr,
float* per_element_local_cache_data_ptr,
int64_t local_entry_offset,
int embedding_dim,
int grad_stride,
int local_embedding_stride,
int cache_set_coverage,
float weight_decay,
float epsilon,
float lr)
{
int64_t block_idx = blockIdx.x;
auto indice = indices_ptr[block_idx];
grads_ptr += block_idx * grad_stride;
IndiceT local_rank_indice = indice - local_entry_offset;
__shared__ float *s_embedding_ptr, *s_per_element_ptr;
float *embedding_ptr, *per_element_ptr;
if (threadIdx.x < 32) {
embedding_ptr = optimizer_get_ptr_from_cache<UseCache>(local_embedding_ptr,
local_embedding_cache_tag_ptr,
local_embedding_cache_data_ptr,
local_rank_indice,
local_embedding_stride,
cache_set_coverage);
per_element_ptr = optimizer_get_ptr_from_cache<UseCache>(per_element_local_embedding_ptr,
per_element_local_cache_tag_ptr,
per_element_local_cache_data_ptr,
local_rank_indice,
local_embedding_stride * 1,
cache_set_coverage);
if (threadIdx.x == 0) {
s_embedding_ptr = embedding_ptr;
s_per_element_ptr = per_element_ptr;
}
}
__syncthreads();
embedding_ptr = s_embedding_ptr;
per_element_ptr = s_per_element_ptr;
float* state_sum_ptr = per_element_ptr;
int loop_start_idx = 0;
for (; loop_start_idx < embedding_dim; loop_start_idx += blockDim.x) {
int local_dim_idx = threadIdx.x;
float grad_value = 0.0f;
int embedding_idx = local_dim_idx + loop_start_idx;
if (embedding_idx < embedding_dim) { grad_value = grads_ptr[embedding_idx]; }
float embedding_value = embedding_ptr[embedding_idx];
grad_value = grad_value + weight_decay * embedding_value;
float state_sum = state_sum_ptr[embedding_idx];
state_sum = state_sum + grad_value * grad_value;
embedding_value = embedding_value - lr * grad_value / (sqrtf(state_sum) + epsilon);
state_sum_ptr[embedding_idx] = state_sum;
embedding_ptr[embedding_idx] = embedding_value;
}
}
template <typename IndiceT>
void ada_grad_optimizer_step_temp_func(const void* indices_ptr,
const float* grads_ptr,
float* local_embedding_ptr,
uint16_t* local_embedding_cache_tag_ptr,
float* local_embedding_cache_data_ptr,
float* per_element_local_embedding_ptr,
uint16_t* per_element_local_cache_tag_ptr,
float* per_element_local_cache_data_ptr,
int64_t local_entry_offset,
int indice_count,
int embedding_dim,
int grad_stride,
int local_embedding_stride,
int cache_set_coverage,
float weight_decay,
float epsilon,
float lr,
cudaStream_t stream)
{
const IndiceT* typed_indices_ptr = static_cast<const IndiceT*>(indices_ptr);
int block_count = indice_count;
if (block_count == 0) return;
int thread_count = wholememory::div_rounding_up_unsafe(embedding_dim, 4);
if (thread_count > 512) thread_count = 512;
if (thread_count < 32) thread_count = 32;
auto func_ptr = ada_grad_optimizer_step_kernel<IndiceT, false>;
if (cache_set_coverage > 0) { func_ptr = ada_grad_optimizer_step_kernel<IndiceT, true>; }
func_ptr<<<block_count, thread_count, 0, stream>>>(typed_indices_ptr,
grads_ptr,
local_embedding_ptr,
local_embedding_cache_tag_ptr,
local_embedding_cache_data_ptr,
per_element_local_embedding_ptr,
per_element_local_cache_tag_ptr,
per_element_local_cache_data_ptr,
local_entry_offset,
embedding_dim,
grad_stride,
local_embedding_stride,
cache_set_coverage,
weight_decay,
epsilon,
lr);
WM_CUDA_CHECK(cudaGetLastError());
WM_CUDA_DEBUG_SYNC_STREAM(stream);
}
REGISTER_DISPATCH_ONE_TYPE(AdaGradOptimizerStepTempFunc,
ada_grad_optimizer_step_temp_func,
SINT3264)
wholememory_error_code_t ada_grad_optimizer_step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
wholememory_tensor_t local_embedding_cache_tag,
wholememory_tensor_t local_embedding_cache_data,
wholememory_tensor_t per_element_local_state,
wholememory_tensor_t per_element_local_cache_tag,
wholememory_tensor_t per_element_local_cache_data,
int64_t local_entry_offset,
int cache_set_coverage,
float weight_decay,
float epsilon,
float lr,
cudaStream_t stream)
{
try {
check_optimizer_inputs<1>(indices,
grads,
local_embedding,
local_embedding_cache_tag,
local_embedding_cache_data,
per_element_local_state,
per_element_local_cache_tag,
per_element_local_cache_data,
cache_set_coverage);
auto* indice_desc = wholememory_tensor_get_tensor_description(indices);
auto* grads_desc = wholememory_tensor_get_tensor_description(grads);
auto* local_embedding_desc = wholememory_tensor_get_tensor_description(local_embedding);
uint16_t* local_embedding_cache_tag_pr = nullptr;
float* local_embedding_cache_data_ptr = nullptr;
uint16_t* per_element_local_cache_tag_ptr = nullptr;
float* per_element_local_cache_data_ptr = nullptr;
if (cache_set_coverage > 0) {
local_embedding_cache_tag_pr =
static_cast<uint16_t*>(wholememory_tensor_get_data_pointer(local_embedding_cache_tag));
local_embedding_cache_data_ptr =
static_cast<float*>(wholememory_tensor_get_data_pointer(local_embedding_cache_data));
per_element_local_cache_tag_ptr =
static_cast<uint16_t*>(wholememory_tensor_get_data_pointer(per_element_local_cache_tag));
per_element_local_cache_data_ptr =
static_cast<float*>(wholememory_tensor_get_data_pointer(per_element_local_cache_data));
}
DISPATCH_ONE_TYPE(
indice_desc->dtype,
AdaGradOptimizerStepTempFunc,
wholememory_tensor_get_data_pointer(indices),
static_cast<float*>(wholememory_tensor_get_data_pointer(grads)),
static_cast<float*>(wholememory_tensor_get_data_pointer(local_embedding)),
local_embedding_cache_tag_pr,
local_embedding_cache_data_ptr,
static_cast<float*>(wholememory_tensor_get_data_pointer(per_element_local_state)),
per_element_local_cache_tag_ptr,
per_element_local_cache_data_ptr,
local_entry_offset,
indice_desc->sizes[0],
grads_desc->sizes[1],
grads_desc->strides[0],
local_embedding_desc->strides[0],
cache_set_coverage,
weight_decay,
epsilon,
lr,
stream);
} catch (wholememory::logic_error& wle) {
WHOLEMEMORY_ERROR("%s", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (wholememory::cuda_error& wce) {
WHOLEMEMORY_ERROR("%s", wce.what());
return WHOLEMEMORY_CUDA_ERROR;
} catch (...) {
WHOLEMEMORY_ERROR("File %s, line %d, Unknown error", __FILE__, __LINE__);
return WHOLEMEMORY_UNKNOW_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
template <typename IndiceT, bool UseCache>
__global__ void rms_prop_optimizer_step_kernel(const IndiceT* indices_ptr,
const float* grads_ptr,
float* local_embedding_ptr,
uint16_t* local_embedding_cache_tag_ptr,
float* local_embedding_cache_data_ptr,
float* per_element_local_embedding_ptr,
uint16_t* per_element_local_cache_tag_ptr,
float* per_element_local_cache_data_ptr,
int64_t local_entry_offset,
int embedding_dim,
int grad_stride,
int local_embedding_stride,
int cache_set_coverage,
float weight_decay,
float epsilon,
float alpha,
float lr)
{
int64_t block_idx = blockIdx.x;
auto indice = indices_ptr[block_idx];
grads_ptr += block_idx * grad_stride;
IndiceT local_rank_indice = indice - local_entry_offset;
__shared__ float *s_embedding_ptr, *s_per_element_ptr;
float *embedding_ptr, *per_element_ptr;
if (threadIdx.x < 32) {
embedding_ptr = optimizer_get_ptr_from_cache<UseCache>(local_embedding_ptr,
local_embedding_cache_tag_ptr,
local_embedding_cache_data_ptr,
local_rank_indice,
local_embedding_stride,
cache_set_coverage);
per_element_ptr = optimizer_get_ptr_from_cache<UseCache>(per_element_local_embedding_ptr,
per_element_local_cache_tag_ptr,
per_element_local_cache_data_ptr,
local_rank_indice,
local_embedding_stride * 1,
cache_set_coverage);
if (threadIdx.x == 0) {
s_embedding_ptr = embedding_ptr;
s_per_element_ptr = per_element_ptr;
}
}
__syncthreads();
embedding_ptr = s_embedding_ptr;
per_element_ptr = s_per_element_ptr;
float* v_ptr = per_element_ptr;
int loop_start_idx = 0;
for (; loop_start_idx < embedding_dim; loop_start_idx += blockDim.x) {
int local_dim_idx = threadIdx.x;
float grad_value = 0.0f;
int embedding_idx = local_dim_idx + loop_start_idx;
if (embedding_idx < embedding_dim) { grad_value = grads_ptr[local_dim_idx + loop_start_idx]; }
float embedding_value = embedding_ptr[embedding_idx];
grad_value = grad_value + weight_decay * embedding_value;
float v = v_ptr[embedding_idx];
v = alpha * v + (1 - alpha) * grad_value * grad_value;
embedding_value = embedding_value - lr * grad_value / (sqrtf(v) + epsilon);
v_ptr[embedding_idx] = v;
embedding_ptr[embedding_idx] = embedding_value;
}
}
template <typename IndiceT>
void rms_prop_optimizer_step_temp_func(const void* indices_ptr,
const float* grads_ptr,
float* local_embedding_ptr,
uint16_t* local_embedding_cache_tag_ptr,
float* local_embedding_cache_data_ptr,
float* per_element_local_embedding_ptr,
uint16_t* per_element_local_cache_tag_ptr,
float* per_element_local_cache_data_ptr,
int64_t local_entry_offset,
int indice_count,
int embedding_dim,
int grad_stride,
int local_embedding_stride,
int cache_set_coverage,
float weight_decay,
float epsilon,
float alpha,
float lr,
cudaStream_t stream)
{
const IndiceT* typed_indices_ptr = static_cast<const IndiceT*>(indices_ptr);
int block_count = indice_count;
if (block_count == 0) return;
int thread_count = wholememory::div_rounding_up_unsafe(embedding_dim, 4);
if (thread_count > 512) thread_count = 512;
if (thread_count < 32) thread_count = 32;
auto func_ptr = rms_prop_optimizer_step_kernel<IndiceT, false>;
if (cache_set_coverage > 0) { func_ptr = rms_prop_optimizer_step_kernel<IndiceT, true>; }
func_ptr<<<block_count, thread_count, 0, stream>>>(typed_indices_ptr,
grads_ptr,
local_embedding_ptr,
local_embedding_cache_tag_ptr,
local_embedding_cache_data_ptr,
per_element_local_embedding_ptr,
per_element_local_cache_tag_ptr,
per_element_local_cache_data_ptr,
local_entry_offset,
embedding_dim,
grad_stride,
local_embedding_stride,
cache_set_coverage,
weight_decay,
epsilon,
alpha,
lr);
WM_CUDA_CHECK(cudaGetLastError());
WM_CUDA_DEBUG_SYNC_STREAM(stream);
}
REGISTER_DISPATCH_ONE_TYPE(RMSPropOptimizerStepTempFunc,
rms_prop_optimizer_step_temp_func,
SINT3264)
wholememory_error_code_t rms_prop_optimizer_step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
wholememory_tensor_t local_embedding_cache_tag,
wholememory_tensor_t local_embedding_cache_data,
wholememory_tensor_t per_element_local_state,
wholememory_tensor_t per_element_local_cache_tag,
wholememory_tensor_t per_element_local_cache_data,
int64_t local_entry_offset,
int cache_set_coverage,
float weight_decay,
float epsilon,
float alpha,
float lr,
cudaStream_t stream)
{
try {
check_optimizer_inputs<1>(indices,
grads,
local_embedding,
local_embedding_cache_tag,
local_embedding_cache_data,
per_element_local_state,
per_element_local_cache_tag,
per_element_local_cache_data,
cache_set_coverage);
auto* indice_desc = wholememory_tensor_get_tensor_description(indices);
auto* grads_desc = wholememory_tensor_get_tensor_description(grads);
auto* local_embedding_desc = wholememory_tensor_get_tensor_description(local_embedding);
uint16_t* local_embedding_cache_tag_pr = nullptr;
float* local_embedding_cache_data_ptr = nullptr;
uint16_t* per_element_local_cache_tag_ptr = nullptr;
float* per_element_local_cache_data_ptr = nullptr;
if (cache_set_coverage > 0) {
local_embedding_cache_tag_pr =
static_cast<uint16_t*>(wholememory_tensor_get_data_pointer(local_embedding_cache_tag));
local_embedding_cache_data_ptr =
static_cast<float*>(wholememory_tensor_get_data_pointer(local_embedding_cache_data));
per_element_local_cache_tag_ptr =
static_cast<uint16_t*>(wholememory_tensor_get_data_pointer(per_element_local_cache_tag));
per_element_local_cache_data_ptr =
static_cast<float*>(wholememory_tensor_get_data_pointer(per_element_local_cache_data));
}
DISPATCH_ONE_TYPE(
indice_desc->dtype,
RMSPropOptimizerStepTempFunc,
wholememory_tensor_get_data_pointer(indices),
static_cast<float*>(wholememory_tensor_get_data_pointer(grads)),
static_cast<float*>(wholememory_tensor_get_data_pointer(local_embedding)),
local_embedding_cache_tag_pr,
local_embedding_cache_data_ptr,
static_cast<float*>(wholememory_tensor_get_data_pointer(per_element_local_state)),
per_element_local_cache_tag_ptr,
per_element_local_cache_data_ptr,
local_entry_offset,
indice_desc->sizes[0],
grads_desc->sizes[1],
grads_desc->strides[0],
local_embedding_desc->strides[0],
cache_set_coverage,
weight_decay,
epsilon,
alpha,
lr,
stream);
} catch (wholememory::logic_error& wle) {
WHOLEMEMORY_ERROR("%s", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (wholememory::cuda_error& wce) {
WHOLEMEMORY_ERROR("%s", wce.what());
return WHOLEMEMORY_CUDA_ERROR;
} catch (...) {
WHOLEMEMORY_ERROR("File %s, line %d, Unknown error", __FILE__, __LINE__);
return WHOLEMEMORY_UNKNOW_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/scatter_func_impl_floating_data_int64_indices.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gather_scatter_func.cuh"
#include <wholememory/wholememory.h>
#include "logger.hpp"
#include "wholememory_ops/register.hpp"
namespace wholememory_ops {
template <typename InputT, typename EmbeddingT>
void scatter_floating_int64_temp_func(const void* input,
wholememory_matrix_description_t input_desc,
void* indices,
int64_t indice_count,
wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream)
{
scatter_temp_func<InputT, int64_t, EmbeddingT>(
input, input_desc, indices, indice_count, embedding_gref, embedding_desc, stream);
}
REGISTER_DISPATCH_TWO_TYPES(ScatterFuncFloatingInt64,
scatter_floating_int64_temp_func,
HALF_FLOAT_DOUBLE,
HALF_FLOAT_DOUBLE)
wholememory_error_code_t scatter_floating_int64_func(
const void* input,
wholememory_matrix_description_t input_desc,
void* indices,
wholememory_array_description_t indices_desc,
wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream)
{
try {
WHOLEMEMORY_CHECK(wholememory_dtype_is_floating_number(embedding_desc.dtype));
WHOLEMEMORY_CHECK(wholememory_dtype_is_floating_number(input_desc.dtype));
WHOLEMEMORY_CHECK(indices_desc.dtype == WHOLEMEMORY_DT_INT64);
DISPATCH_TWO_TYPES(
input_desc.dtype,
embedding_desc.dtype,
ScatterFuncFloatingInt64,
input,
input_desc,
static_cast<char*>(indices) +
indices_desc.storage_offset * wholememory_dtype_get_element_size(indices_desc.dtype),
indices_desc.size,
embedding_gref,
embedding_desc,
stream);
} catch (const wholememory::cuda_error& wle) {
WHOLEMEMORY_ERROR("scatter CUDA LOGIC Error %s\n", wle.what());
fflush(stdout);
return WHOLEMEMORY_LOGIC_ERROR;
} catch (const wholememory::logic_error& le) {
WHOLEMEMORY_ERROR("scatter LOGIC Error %s\n", le.what());
fflush(stdout);
return WHOLEMEMORY_LOGIC_ERROR;
} catch (...) {
return WHOLEMEMORY_UNKNOW_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/scatter_func_impl_floating_data_int32_indices.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gather_scatter_func.cuh"
#include <wholememory/wholememory.h>
#include "logger.hpp"
#include "wholememory_ops/register.hpp"
namespace wholememory_ops {
template <typename InputT, typename EmbeddingT>
void scatter_floating_int32_temp_func(const void* input,
wholememory_matrix_description_t input_desc,
void* indices,
int64_t indice_count,
wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream)
{
scatter_temp_func<InputT, int32_t, EmbeddingT>(
input, input_desc, indices, indice_count, embedding_gref, embedding_desc, stream);
}
REGISTER_DISPATCH_TWO_TYPES(ScatterFuncFloatingInt32,
scatter_floating_int32_temp_func,
HALF_FLOAT_DOUBLE,
HALF_FLOAT_DOUBLE)
wholememory_error_code_t scatter_floating_int32_func(
const void* input,
wholememory_matrix_description_t input_desc,
void* indices,
wholememory_array_description_t indices_desc,
wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream)
{
try {
WHOLEMEMORY_CHECK(wholememory_dtype_is_floating_number(embedding_desc.dtype));
WHOLEMEMORY_CHECK(wholememory_dtype_is_floating_number(input_desc.dtype));
WHOLEMEMORY_CHECK(indices_desc.dtype == WHOLEMEMORY_DT_INT);
DISPATCH_TWO_TYPES(
input_desc.dtype,
embedding_desc.dtype,
ScatterFuncFloatingInt32,
input,
input_desc,
static_cast<char*>(indices) +
indices_desc.storage_offset * wholememory_dtype_get_element_size(indices_desc.dtype),
indices_desc.size,
embedding_gref,
embedding_desc,
stream);
} catch (const wholememory::cuda_error& wle) {
WHOLEMEMORY_ERROR("scatter CUDA LOGIC Error %s\n", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (const wholememory::logic_error& le) {
WHOLEMEMORY_ERROR("scatter LOGIC Error %s\n", le.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (...) {
return WHOLEMEMORY_UNKNOW_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/embedding_cache_func.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/env_func_ptrs.h>
#include <wholememory/wholememory.h>
#include <wholememory/wholememory_tensor.h>
#include "wholememory/embedding_cache.hpp"
namespace wholememory_ops {
/**
* Direct update cache in local rank, local tensor of wm_raw_memory_embedding is cached by
* cache_local_data, cache and raw embedding should have same communicator.
* @param indices : global indices to update, should all in current rank, can have duplicated gids
* In normal use cases, indices are from alltoallv result
* @param indice_desc : tensor description of indices, may be gids after alltoallv.
* @param wm_raw_memory_embedding : the WholeMemory Tensor that is to be cached which stores all
* embeddings.
* @param cache_local_data : embedding_cache_local_data of wm_raw_memory_embedding
* @param cache_set_coverage : cache set coverage
* @param p_env_fns : env fns
* @param stream : cudaStream to use
* @return : wholememory_error_code_t
*/
wholememory_error_code_t update_cache_direct_same_comm(
void* indices,
wholememory_array_description_t indice_desc,
wholememory_tensor_t wm_raw_memory_embedding,
const wholememory::embedding_cache_local_data* cache_local_data,
int cache_set_coverage,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream);
/**
* Update cache in local rank, local tensor of wm_raw_memory_embedding is cached by
* cache_local_data, cache and raw embedding can have same communicator.
* @param indices : global indices to update, should all in current rank, can have duplicated gids
* In normal use cases, indices are from alltoallv result
* @param indice_desc : tensor description of indices, may be gids after alltoallv.
* @param wm_raw_memory_embedding : the WholeMemory Tensor that is to be cached which stores all
* embeddings.
* @param cache_comm : communicator of cache
* @param embedding_entry_count_per_cache_rank : embedding entries covered by each cache rank
* @param cache_local_data : embedding_cache_local_data of wm_raw_memory_embedding
* @param cache_set_coverage : cache set coverage
* @param p_env_fns : env fns
* @param stream : cudaStream to use
* @return : wholememory_error_code_t
*/
wholememory_error_code_t update_cache_different_comm(
void* indices,
wholememory_array_description_t indice_desc,
wholememory_tensor_t wm_raw_memory_embedding,
wholememory_comm_t cache_comm,
size_t embedding_entry_count_per_cache_rank,
const wholememory::embedding_cache_local_data* cache_local_data,
int cache_set_coverage,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream);
wholememory_error_code_t writeback_cache_direct_same_comm(
wholememory_tensor_t wm_raw_memory_embedding,
const wholememory::embedding_cache_local_data* cache_local_data,
int cache_set_coverage,
bool drop_all,
cudaStream_t stream);
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/embedding_cache_func.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "embedding_cache_func.h"
#include <cub/cub.cuh>
#include <wholememory/wholememory_op.h>
#include <wholememory/wholememory_tensor.h>
#include "cuda_macros.hpp"
#include "embedding_cache_func.cuh"
#include "error.hpp"
#include "exchange_ids_nccl_func.h"
#include "logger.hpp"
#include "wholememory/embedding_cache.hpp"
#include "wholememory/env_func_ptrs.h"
#include "wholememory/integer_utils.hpp"
#include "wholememory_ops/functions/embedding_cache_func.cuh"
#include "wholememory_ops/register.hpp"
namespace wholememory_ops {
/**
* Sort local indices, do unique and return unique_indices and count of each indices
* @tparam IndexT : data type of indices
* @param indices : indices to process
* @param indice_desc : description of indices
* @param num_runs : return number of unique indices
* @param unique_indices_handle : temp_memory_handle of unique indices
* @param unique_count_handle : temp_memory_handle of count of each unique indices
* @param p_thrust_allocator : thrust allocator
* @param p_env_fns : env_fns
* @param stream : CUDA stream to use
*/
template <typename IndexT>
void SortUniqueLocalIndicesTempFunc(const void* indices,
wholememory_array_description_t indice_desc,
int* num_runs,
temp_memory_handle* unique_indices_handle,
temp_memory_handle* unique_count_handle,
wm_thrust_allocator* p_thrust_allocator,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream)
{
if (indice_desc.size == 0) return;
wm_thrust_allocator& allocator = *p_thrust_allocator;
WHOLEMEMORY_CHECK_NOTHROW(indice_desc.storage_offset == 0);
const IndexT* indices_to_sort = static_cast<const IndexT*>(indices);
temp_memory_handle sorted_indices_handle(p_env_fns);
sorted_indices_handle.device_malloc(indice_desc.size, indice_desc.dtype);
IndexT* sorted_indices = static_cast<IndexT*>(sorted_indices_handle.pointer());
void* cub_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
cub::DeviceRadixSort::SortKeys(cub_temp_storage,
temp_storage_bytes,
indices_to_sort,
sorted_indices,
indice_desc.size,
0,
sizeof(IndexT) * 8,
stream);
cub_temp_storage = allocator.allocate(temp_storage_bytes);
cub::DeviceRadixSort::SortKeys(cub_temp_storage,
temp_storage_bytes,
indices_to_sort,
sorted_indices,
indice_desc.size,
0,
sizeof(IndexT) * 8,
stream);
unique_indices_handle->device_malloc(indice_desc.size, indice_desc.dtype);
unique_count_handle->device_malloc(indice_desc.size, WHOLEMEMORY_DT_INT);
IndexT* unique_indices = static_cast<IndexT*>(unique_indices_handle->pointer());
int* unique_counts = static_cast<int*>(unique_count_handle->pointer());
temp_memory_handle number_runs_handle(p_env_fns);
number_runs_handle.device_malloc(1, WHOLEMEMORY_DT_INT);
int* number_runs = static_cast<int*>(number_runs_handle.pointer());
cub_temp_storage = nullptr;
temp_storage_bytes = 0;
cub::DeviceRunLengthEncode::Encode(cub_temp_storage,
temp_storage_bytes,
sorted_indices,
unique_indices,
unique_counts,
number_runs,
indice_desc.size,
stream);
cub_temp_storage = allocator.allocate(temp_storage_bytes);
cub::DeviceRunLengthEncode::Encode(cub_temp_storage,
temp_storage_bytes,
sorted_indices,
unique_indices,
unique_counts,
number_runs,
indice_desc.size,
stream);
WM_CUDA_CHECK_NO_THROW(
cudaMemcpyAsync(num_runs, number_runs, sizeof(int), cudaMemcpyDeviceToHost, stream));
WM_CUDA_CHECK_NO_THROW(cudaStreamSynchronize(stream));
}
REGISTER_DISPATCH_ONE_TYPE(SortUniqueLocalIndicesTempFunc, SortUniqueLocalIndicesTempFunc, SINT3264)
template <typename IndexT>
__global__ void ComputeCacheSetLocalID(const IndexT* indices,
int* cache_set_lid,
int indices_num_run,
int64_t rank_start_gid,
int cache_set_coverage)
{
int const thread_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_idx >= indices_num_run) return;
int const cache_set_local_id = (indices[thread_idx] - rank_start_gid) / cache_set_coverage;
cache_set_lid[thread_idx] = cache_set_local_id;
}
template <typename IndexT>
void BucketByCacheSetTempFunc(const void* unique_indices,
int indices_num_run,
temp_memory_handle* unique_cache_set_lid_handle,
temp_memory_handle* unique_cache_set_start_handle,
temp_memory_handle* unique_cache_set_count_handle,
int* cache_set_num_run,
int64_t rank_start_gid,
int cache_set_coverage,
wm_thrust_allocator* p_thrust_allocator,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream)
{
if (indices_num_run == 0) return;
wm_thrust_allocator& allocator = *p_thrust_allocator;
temp_memory_handle cache_set_lid_handle(p_env_fns);
cache_set_lid_handle.device_malloc(indices_num_run, WHOLEMEMORY_DT_INT);
int* cache_set_lid = static_cast<int*>(cache_set_lid_handle.pointer());
int const block_count = wholememory::div_rounding_up_unsafe(indices_num_run, 32);
ComputeCacheSetLocalID<<<block_count, 32, 0, stream>>>(static_cast<const IndexT*>(unique_indices),
cache_set_lid,
indices_num_run,
rank_start_gid,
cache_set_coverage);
WM_CUDA_DEBUG_SYNC_STREAM(stream);
void* cub_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
temp_memory_handle cache_set_num_run_handle(p_env_fns);
unique_cache_set_lid_handle->device_malloc(indices_num_run, WHOLEMEMORY_DT_INT);
unique_cache_set_count_handle->device_malloc(indices_num_run, WHOLEMEMORY_DT_INT);
unique_cache_set_start_handle->device_malloc(indices_num_run, WHOLEMEMORY_DT_INT);
cache_set_num_run_handle.device_malloc(1, WHOLEMEMORY_DT_INT);
int* unique_cache_set_lid = static_cast<int*>(unique_cache_set_lid_handle->pointer());
int* unique_cache_set_start = static_cast<int*>(unique_cache_set_start_handle->pointer());
int* unique_cache_set_count = static_cast<int*>(unique_cache_set_count_handle->pointer());
int* cache_set_num_run_d = static_cast<int*>(cache_set_num_run_handle.pointer());
cub::DeviceRunLengthEncode::Encode(cub_temp_storage,
temp_storage_bytes,
cache_set_lid,
unique_cache_set_lid,
unique_cache_set_count,
cache_set_num_run_d,
indices_num_run,
stream);
cub_temp_storage = allocator.allocate(temp_storage_bytes);
cub::DeviceRunLengthEncode::Encode(cub_temp_storage,
temp_storage_bytes,
cache_set_lid,
unique_cache_set_lid,
unique_cache_set_count,
cache_set_num_run_d,
indices_num_run,
stream);
WM_CUDA_DEBUG_SYNC_STREAM(stream);
*cache_set_num_run = 0;
WM_CUDA_CHECK_NO_THROW(cudaMemcpyAsync(
cache_set_num_run, cache_set_num_run_d, sizeof(int), cudaMemcpyDeviceToHost, stream));
WM_CUDA_CHECK_NO_THROW(cudaStreamSynchronize(stream));
if (*cache_set_num_run == 0) return;
cub_temp_storage = nullptr;
temp_storage_bytes = 0;
cub::DeviceScan::ExclusiveSum(cub_temp_storage,
temp_storage_bytes,
unique_cache_set_count,
unique_cache_set_start,
*cache_set_num_run,
stream);
cub_temp_storage = allocator.allocate(temp_storage_bytes);
cub::DeviceScan::ExclusiveSum(cub_temp_storage,
temp_storage_bytes,
unique_cache_set_count,
unique_cache_set_start,
*cache_set_num_run,
stream);
WM_CUDA_DEBUG_SYNC_STREAM(stream);
}
REGISTER_DISPATCH_ONE_TYPE(BucketByCacheSetTempFunc, BucketByCacheSetTempFunc, SINT3264)
template <typename IndexT>
__global__ void UpdateCacheDirectKernel(const int* unique_cache_set_lid,
const int* unique_cache_set_update_start,
const int* unique_cache_set_update_count,
const IndexT* unique_indices,
const int* unique_indices_count,
uint16_t* local_cache_line_tag,
uint16_t* local_cache_line_lfu_count,
int64_t* local_access_count,
int4* local_cached_data,
int4* local_memory_data,
int embedding_dim_in_int4,
int64_t rank_start_gid,
int cache_set_coverage)
{
static_assert(wholememory::embedding_cache_base::kCacheSetSize == 32);
int64_t const cache_set_lid = unique_cache_set_lid[blockIdx.x];
local_cache_line_tag += cache_set_lid * wholememory::embedding_cache_base::kCacheSetSize;
local_cache_line_lfu_count += cache_set_lid * wholememory::embedding_cache_base::kCacheSetSize;
local_access_count += cache_set_lid * cache_set_coverage;
local_cached_data +=
cache_set_lid * wholememory::embedding_cache_base::kCacheSetSize * embedding_dim_in_int4;
local_memory_data += cache_set_lid * cache_set_coverage * embedding_dim_in_int4;
CacheLineInfo cache_line_info;
cache_line_info.LoadInfo(local_cache_line_tag, local_cache_line_lfu_count);
int cache_set_update_start_idx = unique_cache_set_update_start[blockIdx.x];
int cache_set_update_count = unique_cache_set_update_count[blockIdx.x];
using Updater = wholememory_ops::CacheSetUpdater<IndexT>;
Updater updater;
__shared__ typename Updater::TempStorage temp_storage;
__shared__ IndexT s_load_to_cache_ids[CacheSetUpdater<IndexT>::kCacheSetSize];
__shared__ IndexT s_write_back_to_memory_ids[CacheSetUpdater<IndexT>::kCacheSetSize];
s_load_to_cache_ids[threadIdx.x] = -1;
s_write_back_to_memory_ids[threadIdx.x] = -1;
int old_cached_lid = cache_line_info.LocalID();
__syncthreads();
updater.template UpdateCache<true, true>(temp_storage,
cache_line_info,
local_access_count,
unique_indices + cache_set_update_start_idx,
unique_indices_count + cache_set_update_start_idx,
&s_load_to_cache_ids[0],
&s_write_back_to_memory_ids[0],
rank_start_gid + cache_set_coverage * cache_set_lid,
cache_set_update_count);
__syncthreads();
IndexT thread_node_id = s_write_back_to_memory_ids[threadIdx.x];
unsigned int valid_mask = __ballot_sync(0xFFFFFFFF, thread_node_id >= 0);
int need_write_back_count = __popc(valid_mask);
assert(valid_mask == (1ULL << need_write_back_count) - 1);
for (int i = 0; i < need_write_back_count; i++) {
IndexT write_back_gid = s_write_back_to_memory_ids[i];
int local_id = write_back_gid - rank_start_gid - cache_set_coverage * cache_set_lid;
uint32_t mask = __ballot_sync(0xFFFFFFFF, static_cast<int>(old_cached_lid == local_id));
int cache_line_idx = __ffs(mask) - 1;
assert(cache_line_idx >= 0 && cache_line_idx <= 32);
assert(local_id >= 0 && local_id < cache_set_coverage);
for (int idx = threadIdx.x; idx < embedding_dim_in_int4; idx += 32) {
local_memory_data[local_id * embedding_dim_in_int4 + idx] =
local_cached_data[cache_line_idx * embedding_dim_in_int4 + idx];
}
}
thread_node_id = s_load_to_cache_ids[threadIdx.x];
valid_mask = __ballot_sync(0xFFFFFFFF, thread_node_id >= 0);
int need_load_count = __popc(valid_mask);
assert(valid_mask == (1ULL << need_load_count) - 1);
for (int i = 0; i < need_load_count; i++) {
IndexT load_gid = s_load_to_cache_ids[i];
int local_id = load_gid - rank_start_gid - cache_set_coverage * cache_set_lid;
int cache_line_idx = cache_line_info.KeyIndexSync(local_id);
assert(cache_line_idx >= 0 && cache_line_idx <= 32);
assert(local_id >= 0 && local_id < cache_set_coverage);
for (int idx = threadIdx.x; idx < embedding_dim_in_int4; idx += 32) {
local_cached_data[cache_line_idx * embedding_dim_in_int4 + idx] =
local_memory_data[local_id * embedding_dim_in_int4 + idx];
}
}
cache_line_info.StoreInfo(local_cache_line_tag, local_cache_line_lfu_count);
}
template <typename IndexT>
void UpdateCacheDirectTempFunc(const int* unique_cache_set_lid,
const int* unique_cache_set_start,
const int* unique_cache_set_count,
const void* unique_indices,
const int* unique_indices_count,
uint16_t* local_cache_line_tag,
uint16_t* local_cache_line_lfu_count,
int64_t* local_access_count,
int4* local_cache_line_data,
int4* local_memory_data,
int embedding_dim_in_int4,
int cache_set_num_run,
int64_t rank_start_gid,
int cache_set_coverage,
cudaStream_t stream)
{
if (cache_set_num_run > 0) {
UpdateCacheDirectKernel<IndexT>
<<<cache_set_num_run, 32, 0, stream>>>(unique_cache_set_lid,
unique_cache_set_start,
unique_cache_set_count,
static_cast<const IndexT*>(unique_indices),
unique_indices_count,
local_cache_line_tag,
local_cache_line_lfu_count,
local_access_count,
local_cache_line_data,
local_memory_data,
embedding_dim_in_int4,
rank_start_gid,
cache_set_coverage);
}
WM_CUDA_DEBUG_SYNC_STREAM(stream);
}
REGISTER_DISPATCH_ONE_TYPE(UpdateCacheDirectTempFunc, UpdateCacheDirectTempFunc, SINT3264)
wholememory_error_code_t update_cache_direct_same_comm(
void* indices,
wholememory_array_description_t indice_desc,
wholememory_tensor_t wm_raw_memory_embedding,
const wholememory::embedding_cache_local_data* cache_local_data,
int cache_set_coverage,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream)
{
wm_thrust_allocator thrust_allocator(p_env_fns);
int world_size = 1;
int world_rank = 0;
wholememory_handle_t wholememory_handle =
wholememory_tensor_get_memory_handle(wm_raw_memory_embedding);
wholememory_comm_t wm_comm;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_get_communicator(&wm_comm, wholememory_handle));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_size(&world_size, wm_comm));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_rank(&world_rank, wm_comm));
auto* raw_embedding_desc =
wholememory_tensor_get_tensor_description(wholememory_tensor_get_root(wm_raw_memory_embedding));
size_t embedding_entry_count_per_rank = 0;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_determine_entry_partition_plan(
&embedding_entry_count_per_rank, raw_embedding_desc->sizes[0], world_size));
int indices_num_run = 0;
temp_memory_handle unique_indice_handle(p_env_fns), unique_count_handle(p_env_fns);
try {
DISPATCH_ONE_TYPE(indice_desc.dtype,
SortUniqueLocalIndicesTempFunc,
indices,
indice_desc,
&indices_num_run,
&unique_indice_handle,
&unique_count_handle,
&thrust_allocator,
p_env_fns,
stream);
} catch (...) {
WHOLEMEMORY_ERROR("SortUniqueLocalIndicesTempFunc failed.");
return WHOLEMEMORY_LOGIC_ERROR;
}
temp_memory_handle unique_cache_set_lid_handle(p_env_fns),
unique_cache_set_start_handle(p_env_fns), unique_cache_set_count_handle(p_env_fns);
int cache_set_num_run;
DISPATCH_ONE_TYPE(indice_desc.dtype,
BucketByCacheSetTempFunc,
unique_indice_handle.pointer(),
indices_num_run,
&unique_cache_set_lid_handle,
&unique_cache_set_start_handle,
&unique_cache_set_count_handle,
&cache_set_num_run,
world_rank * embedding_entry_count_per_rank,
cache_set_coverage,
&thrust_allocator,
p_env_fns,
stream);
int* unique_cache_set_lid = static_cast<int*>(unique_cache_set_lid_handle.pointer());
int* unique_cache_set_start = static_cast<int*>(unique_cache_set_start_handle.pointer());
int* unique_cache_set_count = static_cast<int*>(unique_cache_set_count_handle.pointer());
void* embedding_local_pointer = nullptr;
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_get_local_memory(&embedding_local_pointer,
nullptr,
nullptr,
wholememory_tensor_get_memory_handle(wm_raw_memory_embedding)));
int const embedding_dim = raw_embedding_desc->strides[0];
size_t const dtype_size = wholememory_dtype_get_element_size(raw_embedding_desc->dtype);
WHOLEMEMORY_CHECK_NOTHROW(embedding_dim * dtype_size % 16 == 0);
int const embedding_dim_in_int4 = embedding_dim * dtype_size / 16;
DISPATCH_ONE_TYPE(
indice_desc.dtype,
UpdateCacheDirectTempFunc,
unique_cache_set_lid,
unique_cache_set_start,
unique_cache_set_count,
unique_indice_handle.pointer(),
static_cast<const int*>(unique_count_handle.pointer()),
static_cast<uint16_t*>(wholememory_tensor_get_data_pointer(cache_local_data->cache_line_tag_)),
static_cast<uint16_t*>(
wholememory_tensor_get_data_pointer(cache_local_data->cache_line_lfu_count_)),
static_cast<int64_t*>(wholememory_tensor_get_data_pointer(cache_local_data->access_count_)),
static_cast<int4*>(wholememory_tensor_get_data_pointer(cache_local_data->cache_line_data_)),
static_cast<int4*>(embedding_local_pointer),
embedding_dim_in_int4,
cache_set_num_run,
world_rank * embedding_entry_count_per_rank,
cache_set_coverage,
stream);
return WHOLEMEMORY_SUCCESS;
}
template <typename IndexT>
__global__ void DetermineLoadCacheKernel(const int* unique_cache_set_lid,
const int* unique_cache_set_update_start,
const int* unique_cache_set_update_count,
const IndexT* unique_indices,
const int* unique_indices_count,
uint16_t* local_cache_line_tag,
uint16_t* local_cache_line_lfu_count,
int64_t* local_access_count,
IndexT* output_local_write_cache_index,
IndexT* output_global_load_gid,
int64_t rank_start_gid,
int cache_set_coverage)
{
static_assert(wholememory::embedding_cache_base::kCacheSetSize == 32);
int64_t const cache_set_lid = unique_cache_set_lid[blockIdx.x];
local_cache_line_tag += cache_set_lid * wholememory::embedding_cache_base::kCacheSetSize;
local_cache_line_lfu_count += cache_set_lid * wholememory::embedding_cache_base::kCacheSetSize;
local_access_count += cache_set_lid * cache_set_coverage;
CacheLineInfo cache_line_info;
cache_line_info.LoadInfo(local_cache_line_tag, local_cache_line_lfu_count);
int cache_set_update_start_idx = unique_cache_set_update_start[blockIdx.x];
int cache_set_update_count = unique_cache_set_update_count[blockIdx.x];
output_local_write_cache_index += cache_set_update_start_idx;
output_global_load_gid += cache_set_update_start_idx;
using Updater = wholememory_ops::CacheSetUpdater<IndexT>;
Updater updater;
__shared__ typename Updater::TempStorage temp_storage;
__shared__ IndexT s_load_to_cache_ids[CacheSetUpdater<IndexT>::kCacheSetSize];
s_load_to_cache_ids[threadIdx.x] = -1;
int old_cached_lid = cache_line_info.LocalID();
__syncthreads();
updater.template UpdateCache<true, false>(temp_storage,
cache_line_info,
local_access_count,
unique_indices + cache_set_update_start_idx,
unique_indices_count + cache_set_update_start_idx,
&s_load_to_cache_ids[0],
nullptr,
rank_start_gid + cache_set_coverage * cache_set_lid,
cache_set_update_count);
__syncthreads();
IndexT thread_node_id = s_load_to_cache_ids[threadIdx.x];
unsigned int valid_mask = __ballot_sync(0xFFFFFFFF, thread_node_id >= 0);
int need_load_count = __popc(valid_mask);
assert(valid_mask == (1ULL << need_load_count) - 1);
for (int i = 0; i < need_load_count; i++) {
IndexT load_gid = s_load_to_cache_ids[i];
int local_id = load_gid - rank_start_gid - cache_set_coverage * cache_set_lid;
int cache_line_idx = cache_line_info.KeyIndexSync(local_id);
assert(cache_line_idx >= 0 && cache_line_idx <= 32);
output_global_load_gid[i] = load_gid;
output_local_write_cache_index[i] =
cache_line_idx + cache_set_lid * wholememory::embedding_cache_base::kCacheSetSize;
}
for (int i = need_load_count + threadIdx.x; i < cache_set_update_count; i += 32) {
output_global_load_gid[i] = output_local_write_cache_index[i] = -1;
}
cache_line_info.StoreInfo(local_cache_line_tag, local_cache_line_lfu_count);
}
template <typename IndexT>
void DetermineLoadCacheTempFunc(const int* unique_cache_set_lid,
const int* unique_cache_set_update_start,
const int* unique_cache_set_update_count,
const void* unique_indices,
const int* unique_indices_count,
uint16_t* local_cache_line_tag,
uint16_t* local_cache_line_lfu_count,
int64_t* local_access_count,
void* output_local_write_cache_index,
void* output_global_load_gid,
int64_t rank_start_gid,
int cache_set_coverage,
int cache_set_num_run,
cudaStream_t stream)
{
if (cache_set_num_run > 0) {
DetermineLoadCacheKernel<IndexT>
<<<cache_set_num_run, 32, 0, stream>>>(unique_cache_set_lid,
unique_cache_set_update_start,
unique_cache_set_update_count,
static_cast<const IndexT*>(unique_indices),
unique_indices_count,
local_cache_line_tag,
local_cache_line_lfu_count,
local_access_count,
static_cast<IndexT*>(output_local_write_cache_index),
static_cast<IndexT*>(output_global_load_gid),
rank_start_gid,
cache_set_coverage);
}
WM_CUDA_DEBUG_SYNC_STREAM(stream);
}
REGISTER_DISPATCH_ONE_TYPE(DetermineLoadCacheTempFunc, DetermineLoadCacheTempFunc, SINT3264)
wholememory_error_code_t update_cache_different_comm(
void* indices,
wholememory_array_description_t indice_desc,
wholememory_tensor_t wm_raw_memory_embedding,
wholememory_comm_t cache_comm,
size_t embedding_entry_count_per_cache_rank,
const wholememory::embedding_cache_local_data* cache_local_data,
int cache_set_coverage,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream)
{
wm_thrust_allocator thrust_allocator(p_env_fns);
int cache_world_size = 1;
int cache_world_rank = 0;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_size(&cache_world_size, cache_comm));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_rank(&cache_world_rank, cache_comm));
int indices_num_run = 0;
temp_memory_handle unique_indice_handle(p_env_fns), unique_count_handle(p_env_fns);
try {
DISPATCH_ONE_TYPE(indice_desc.dtype,
SortUniqueLocalIndicesTempFunc,
indices,
indice_desc,
&indices_num_run,
&unique_indice_handle,
&unique_count_handle,
&thrust_allocator,
p_env_fns,
stream);
} catch (...) {
WHOLEMEMORY_ERROR("SortUniqueLocalIndicesTempFunc failed.");
return WHOLEMEMORY_LOGIC_ERROR;
}
temp_memory_handle unique_cache_set_lid_handle(p_env_fns),
unique_cache_set_start_handle(p_env_fns), unique_cache_set_count_handle(p_env_fns);
int cache_set_num_run;
DISPATCH_ONE_TYPE(indice_desc.dtype,
BucketByCacheSetTempFunc,
unique_indice_handle.pointer(),
indices_num_run,
&unique_cache_set_lid_handle,
&unique_cache_set_start_handle,
&unique_cache_set_count_handle,
&cache_set_num_run,
cache_world_rank * embedding_entry_count_per_cache_rank,
cache_set_coverage,
&thrust_allocator,
p_env_fns,
stream);
int* unique_cache_set_lid = static_cast<int*>(unique_cache_set_lid_handle.pointer());
int* unique_cache_set_start = static_cast<int*>(unique_cache_set_start_handle.pointer());
int* unique_cache_set_count = static_cast<int*>(unique_cache_set_count_handle.pointer());
temp_memory_handle global_load_gid_handle(p_env_fns), local_write_cache_index_handle(p_env_fns);
void* global_load_gid_ptr =
global_load_gid_handle.device_malloc(indices_num_run, indice_desc.dtype);
void* local_write_cache_index_ptr =
local_write_cache_index_handle.device_malloc(indices_num_run, indice_desc.dtype);
try {
DISPATCH_ONE_TYPE(
indice_desc.dtype,
DetermineLoadCacheTempFunc,
unique_cache_set_lid,
unique_cache_set_start,
unique_cache_set_count,
unique_indice_handle.pointer(),
static_cast<const int*>(unique_count_handle.pointer()),
static_cast<uint16_t*>(
wholememory_tensor_get_data_pointer(cache_local_data->cache_line_tag_)),
static_cast<uint16_t*>(
wholememory_tensor_get_data_pointer(cache_local_data->cache_line_lfu_count_)),
static_cast<int64_t*>(wholememory_tensor_get_data_pointer(cache_local_data->access_count_)),
local_write_cache_index_ptr,
global_load_gid_ptr,
cache_world_rank * embedding_entry_count_per_cache_rank,
cache_set_coverage,
cache_set_num_run,
stream);
} catch (...) {
WHOLEMEMORY_ERROR("DetermineLoadCacheTempFunc failed.");
return WHOLEMEMORY_LOGIC_ERROR;
}
temp_memory_handle temp_cache_buffer_handle(p_env_fns);
wholememory_tensor_description_t temp_cache_desc =
*wholememory_tensor_get_tensor_description(wm_raw_memory_embedding);
temp_cache_desc.storage_offset = 0;
temp_cache_desc.sizes[0] = indices_num_run;
void* temp_cache_ptr = temp_cache_buffer_handle.device_malloc(
wholememory_get_memory_element_count_from_tensor(&temp_cache_desc), temp_cache_desc.dtype);
wholememory_tensor_t temp_cache_tensor;
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_make_tensor_from_pointer(&temp_cache_tensor, temp_cache_ptr, &temp_cache_desc));
wholememory_tensor_description_t cache_indice_desc;
wholememory_copy_array_desc_to_tensor(&cache_indice_desc, &indice_desc);
cache_indice_desc.sizes[0] = indices_num_run;
cache_indice_desc.storage_offset = 0;
wholememory_tensor_t gather_indice_tensor, scatter_indice_tensor;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_make_tensor_from_pointer(
&gather_indice_tensor, global_load_gid_ptr, &cache_indice_desc));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_make_tensor_from_pointer(
&scatter_indice_tensor, local_write_cache_index_ptr, &cache_indice_desc));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_gather(
wm_raw_memory_embedding, gather_indice_tensor, temp_cache_tensor, p_env_fns, stream));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_scatter(temp_cache_tensor,
scatter_indice_tensor,
cache_local_data->cache_line_data_,
p_env_fns,
stream));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_destroy_tensor(temp_cache_tensor));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_destroy_tensor(gather_indice_tensor));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_destroy_tensor(scatter_indice_tensor));
WM_CUDA_DEBUG_SYNC_STREAM(stream);
return WHOLEMEMORY_SUCCESS;
}
__global__ void WriteBackCacheDirectKernel(uint16_t* local_cache_line_tag,
int4* local_cached_data,
int4* local_memory_data,
int embedding_dim_in_int4,
int cache_set_coverage,
bool drop_all)
{
static_assert(wholememory::embedding_cache_base::kCacheSetSize == 32);
int64_t const cache_set_lid = blockIdx.x;
local_cache_line_tag += cache_set_lid * wholememory::embedding_cache_base::kCacheSetSize;
local_cached_data +=
cache_set_lid * wholememory::embedding_cache_base::kCacheSetSize * embedding_dim_in_int4;
local_memory_data += cache_set_lid * cache_set_coverage * embedding_dim_in_int4;
CacheLineInfo cache_line_info;
cache_line_info.LoadTag(local_cache_line_tag);
bool is_modified = cache_line_info.IsModified() && cache_line_info.IsValid();
auto modify_mask = __ballot_sync(0xFFFFFFFF, static_cast<int>(is_modified));
while (modify_mask != 0) {
int lane_idx = __ffs(modify_mask) - 1;
int local_id = __shfl_sync(0xFFFFFFFF, cache_line_info.LocalID(), lane_idx, 32);
for (int idx = threadIdx.x; idx < embedding_dim_in_int4; idx += 32) {
local_memory_data[local_id * embedding_dim_in_int4 + idx] =
local_cached_data[lane_idx * embedding_dim_in_int4 + idx];
}
modify_mask ^= (1 << lane_idx);
}
if (drop_all) {
cache_line_info.ClearCacheLine();
} else {
cache_line_info.ClearModify();
}
cache_line_info.StoreTag(local_cache_line_tag);
}
wholememory_error_code_t writeback_cache_direct_same_comm(
wholememory_tensor_t wm_raw_memory_embedding,
const wholememory::embedding_cache_local_data* cache_local_data,
int cache_set_coverage,
bool drop_all,
cudaStream_t stream)
{
int world_size = 1;
int world_rank = 0;
wholememory_handle_t wholememory_handle =
wholememory_tensor_get_memory_handle(wm_raw_memory_embedding);
wholememory_comm_t wm_comm;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_get_communicator(&wm_comm, wholememory_handle));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_size(&world_size, wm_comm));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_rank(&world_rank, wm_comm));
auto* raw_embedding_desc =
wholememory_tensor_get_tensor_description(wholememory_tensor_get_root(wm_raw_memory_embedding));
size_t embedding_entry_count_per_rank = 0;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_determine_entry_partition_plan(
&embedding_entry_count_per_rank, raw_embedding_desc->sizes[0], world_size));
WHOLEMEMORY_CHECK_NOTHROW(embedding_entry_count_per_rank % cache_set_coverage == 0);
wholememory_tensor_t raw_local_tensor;
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_tensor_map_local_tensor(wm_raw_memory_embedding, &raw_local_tensor));
int cache_set_count = wholememory::div_rounding_up_unsafe(
wholememory_tensor_get_tensor_description(raw_local_tensor)->sizes[0], cache_set_coverage);
int const embedding_dim = raw_embedding_desc->strides[0];
size_t const dtype_size = wholememory_dtype_get_element_size(raw_embedding_desc->dtype);
WHOLEMEMORY_CHECK_NOTHROW(embedding_dim * dtype_size % 16 == 0);
int const embedding_dim_in_int4 = embedding_dim * dtype_size / 16;
if (cache_set_count > 0) {
WriteBackCacheDirectKernel<<<cache_set_count, 32, 0, stream>>>(
static_cast<uint16_t*>(
wholememory_tensor_get_data_pointer(cache_local_data->cache_line_tag_)),
static_cast<int4*>(wholememory_tensor_get_data_pointer(cache_local_data->cache_line_data_)),
static_cast<int4*>(wholememory_tensor_get_data_pointer(raw_local_tensor)),
embedding_dim_in_int4,
cache_set_coverage,
drop_all);
WM_CUDA_CHECK_NO_THROW(cudaGetLastError());
}
WM_CUDA_DEBUG_SYNC_STREAM(stream);
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_destroy_tensor(raw_local_tensor));
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/gather_func.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gather_scatter_func.h"
#include "cuda_macros.hpp"
#include "error.hpp"
namespace wholememory_ops {
wholememory_error_code_t gather_integer_int32_func(wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
void* indices,
wholememory_array_description_t indices_desc,
void* output,
wholememory_matrix_description_t output_desc,
cudaStream_t stream);
wholememory_error_code_t gather_integer_int64_func(wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
void* indices,
wholememory_array_description_t indices_desc,
void* output,
wholememory_matrix_description_t output_desc,
cudaStream_t stream);
wholememory_error_code_t gather_floating_int32_func(wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
void* indices,
wholememory_array_description_t indices_desc,
void* output,
wholememory_matrix_description_t output_desc,
cudaStream_t stream);
wholememory_error_code_t gather_floating_int64_func(wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
void* indices,
wholememory_array_description_t indices_desc,
void* output,
wholememory_matrix_description_t output_desc,
cudaStream_t stream);
wholememory_error_code_t gather_func(wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
void* indices,
wholememory_array_description_t indices_desc,
void* output,
wholememory_matrix_description_t output_desc,
cudaStream_t stream)
{
try {
bool embedding_is_float = wholememory_dtype_is_floating_number(embedding_desc.dtype);
WHOLEMEMORY_CHECK(embedding_is_float ||
wholememory_dtype_is_integer_number(embedding_desc.dtype));
bool output_is_float = wholememory_dtype_is_floating_number(output_desc.dtype);
WHOLEMEMORY_CHECK(output_is_float || wholememory_dtype_is_integer_number(output_desc.dtype));
WHOLEMEMORY_EXPECTS(
embedding_is_float == output_is_float,
"embedding and output should be same number type, e.g. floating number or integer number.");
if (indices_desc.size == 0) { return WHOLEMEMORY_SUCCESS; }
wholememory_error_code_t (*p_gather_func)(wholememory_gref_t,
wholememory_matrix_description_t,
void* indices,
wholememory_array_description_t,
void*,
wholememory_matrix_description_t,
cudaStream_t) = nullptr;
if (embedding_is_float) {
if (indices_desc.dtype == WHOLEMEMORY_DT_INT) {
p_gather_func = gather_floating_int32_func;
} else {
p_gather_func = gather_floating_int64_func;
}
} else {
if (indices_desc.dtype == WHOLEMEMORY_DT_INT) {
p_gather_func = gather_integer_int32_func;
} else {
p_gather_func = gather_integer_int64_func;
}
}
return p_gather_func(
embedding_gref, embedding_desc, indices, indices_desc, output, output_desc, stream);
} catch (const wholememory::cuda_error& rle) {
return WHOLEMEMORY_LOGIC_ERROR;
} catch (const wholememory::logic_error& le) {
return WHOLEMEMORY_LOGIC_ERROR;
} catch (...) {
return WHOLEMEMORY_LOGIC_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/exchange_ids_nccl_func.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "exchange_ids_nccl_func.h"
#include <cub/device/device_radix_sort.cuh>
#include <thrust/sequence.h>
#include "bucket_ids_func.h"
#include "error.hpp"
#include "logger.hpp"
#include "wholememory/communicator.hpp"
#include "wholememory_ops/register.hpp"
namespace wholememory_ops {
template <typename IndexT>
struct UnsignedType {};
template <>
struct UnsignedType<int> {
using UType = unsigned int;
};
template <>
struct UnsignedType<int64_t> {
using UType = uint64_t;
};
template <typename IndexT>
void exchange_ids_temp_func(const void* indices_before_sort,
wholememory_array_description_t indices_desc,
const int64_t* host_recv_rank_id_count_ptr,
const int64_t* host_rank_id_count_ptr,
const int64_t* host_rank_id_offset_ptr,
temp_memory_handle* dev_recv_indice_buffer,
void* indices_after_sort,
int64_t* raw_indices,
wholememory_comm_t wm_comm,
wm_thrust_allocator* p_thrust_allocator,
cudaStream_t stream)
{
auto index_type = indices_desc.dtype;
WHOLEMEMORY_CHECK(indices_desc.storage_offset == 0);
WHOLEMEMORY_CHECK(index_type == WHOLEMEMORY_DT_INT || index_type == WHOLEMEMORY_DT_INT64);
wm_thrust_allocator& allocator = *p_thrust_allocator;
int64_t* seq_indices = reinterpret_cast<int64_t*>(allocator.allocate(
wholememory_get_memory_element_count_from_array(&indices_desc) * sizeof(int64_t)));
thrust::sequence(
thrust::cuda::par(allocator).on(stream), seq_indices, seq_indices + indices_desc.size, 0);
// use UTypeT to put minus indices at last.
using UTypeT = typename UnsignedType<IndexT>::UType;
const UTypeT* indices_to_sort = static_cast<const UTypeT*>(indices_before_sort);
UTypeT* sorted_indice = static_cast<UTypeT*>(indices_after_sort);
void* cub_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
cub::DeviceRadixSort::SortPairs(cub_temp_storage,
temp_storage_bytes,
indices_to_sort,
sorted_indice,
seq_indices,
raw_indices,
indices_desc.size,
0,
sizeof(UTypeT) * 8,
stream);
cub_temp_storage = allocator.allocate(temp_storage_bytes);
cub::DeviceRadixSort::SortPairs(cub_temp_storage,
temp_storage_bytes,
indices_to_sort,
sorted_indice,
seq_indices,
raw_indices,
indices_desc.size,
0,
sizeof(UTypeT) * 8,
stream);
int64_t total_recv_count = 0;
int world_size;
WHOLEMEMORY_CHECK(wholememory_communicator_get_size(&world_size, wm_comm) == WHOLEMEMORY_SUCCESS);
std::vector<size_t> host_recv_offset(world_size);
for (int i = 0; i < world_size; i++) {
host_recv_offset[i] = total_recv_count;
total_recv_count += host_recv_rank_id_count_ptr[i];
}
IndexT* dev_recv_indice_buffer_ptr =
static_cast<IndexT*>(dev_recv_indice_buffer->device_malloc(total_recv_count, index_type));
wm_comm->alltoallv(sorted_indice,
dev_recv_indice_buffer_ptr,
reinterpret_cast<const size_t*>(host_rank_id_count_ptr),
reinterpret_cast<const size_t*>(host_rank_id_offset_ptr),
reinterpret_cast<const size_t*>(host_recv_rank_id_count_ptr),
host_recv_offset.data(),
index_type,
stream);
wm_comm->sync_stream(stream);
allocator.deallocate(reinterpret_cast<char*>(seq_indices),
wholememory_get_memory_size_from_array(&indices_desc));
allocator.deallocate(static_cast<char*>(cub_temp_storage), temp_storage_bytes);
}
REGISTER_DISPATCH_ONE_TYPE(ExchangeIDsNCCL, exchange_ids_temp_func, SINT3264)
wholememory_error_code_t exchange_ids_func(const void* indices_before_sort,
wholememory_array_description_t indices_desc,
const int64_t* host_recv_rank_id_count_ptr,
const int64_t* host_rank_id_count_ptr,
const int64_t* host_rank_id_offset_ptr,
temp_memory_handle* dev_recv_indices_buffer_handle,
void* indices_after_sort,
int64_t* raw_indices,
wholememory_comm_t wm_comm,
wm_thrust_allocator* p_thrust_allocator,
cudaStream_t stream)
{
try {
DISPATCH_ONE_TYPE(indices_desc.dtype,
ExchangeIDsNCCL,
indices_before_sort,
indices_desc,
host_recv_rank_id_count_ptr,
host_rank_id_count_ptr,
host_rank_id_offset_ptr,
dev_recv_indices_buffer_handle,
indices_after_sort,
raw_indices,
wm_comm,
p_thrust_allocator,
stream);
} catch (wholememory::cuda_error& wce) {
WHOLEMEMORY_ERROR("exchange_ids_func CUDA LOGIC Error %s\n", wce.what());
return WHOLEMEMORY_CUDA_ERROR;
} catch (wholememory::logic_error& wle) {
WHOLEMEMORY_ERROR("exchange_ids_func LOGIC Error %s\n", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (...) {
return WHOLEMEMORY_UNKNOW_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t bucket_and_exchange_ids_func(
void* indices,
wholememory_array_description_t indice_desc,
int64_t* host_recv_rank_id_count_ptr,
int64_t* host_rank_id_count_ptr,
temp_memory_handle* dev_recv_indices_buffer_handle,
int64_t* dev_raw_indice_ptr,
size_t embedding_entry_count_per_rank,
wholememory_comm_t wm_comm,
wm_thrust_allocator* p_thrust_allocator,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream)
{
int world_size;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_size(&world_size, wm_comm));
// Exchange node count
temp_memory_handle dev_rank_id_count(p_env_fns);
int64_t* dev_rank_id_count_ptr =
static_cast<int64_t*>(dev_rank_id_count.device_malloc(world_size, WHOLEMEMORY_DT_INT64));
WHOLEMEMORY_RETURN_ON_FAIL(bucket_ids_for_ranks(indices,
indice_desc,
dev_rank_id_count_ptr,
embedding_entry_count_per_rank,
world_size,
get_device_prop(-1),
stream));
WM_CUDA_CHECK(cudaGetLastError());
temp_memory_handle host_rank_id_offset(p_env_fns);
temp_memory_handle dev_sorted_indice(p_env_fns);
int64_t* host_rank_id_offset_ptr =
static_cast<int64_t*>(host_rank_id_offset.host_malloc(world_size + 1, WHOLEMEMORY_DT_INT64));
void* dev_sorted_indice_ptr =
dev_sorted_indice.device_malloc(indice_desc.size, indice_desc.dtype);
WM_CUDA_CHECK(cudaMemcpyAsync(host_rank_id_count_ptr,
dev_rank_id_count_ptr,
sizeof(int64_t) * world_size,
cudaMemcpyDeviceToHost,
stream));
WM_CUDA_CHECK(cudaGetLastError());
WM_CUDA_CHECK(cudaStreamSynchronize(stream));
wm_comm->host_alltoall(
host_rank_id_count_ptr, host_recv_rank_id_count_ptr, 1, WHOLEMEMORY_DT_INT64);
host_rank_id_offset_ptr[0] = 0;
for (int i = 0; i < world_size; i++) {
host_rank_id_offset_ptr[i + 1] = host_rank_id_offset_ptr[i] + host_rank_id_count_ptr[i];
}
WHOLEMEMORY_EXPECTS(wm_comm->sync_stream() == WHOLEMEMORY_SUCCESS,
"Rank id count AllToAll failed.");
void* indice_ptr =
static_cast<char*>(indices) +
wholememory_dtype_get_element_size(indice_desc.dtype) * indice_desc.storage_offset;
// Exchange ids
WHOLEMEMORY_RETURN_ON_FAIL(exchange_ids_func(indice_ptr,
indice_desc,
host_recv_rank_id_count_ptr,
host_rank_id_count_ptr,
host_rank_id_offset_ptr,
dev_recv_indices_buffer_handle,
dev_sorted_indice_ptr,
dev_raw_indice_ptr,
wm_comm,
p_thrust_allocator,
stream));
WM_CUDA_DEBUG_SYNC_STREAM(stream);
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/bucket_ids_func.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "bucket_ids_func.h"
#include <cassert>
#include <cstdint>
#include <wholememory/wholememory.h>
#include "cuda_macros.hpp"
#include "error.hpp"
#include "logger.hpp"
#include "wholememory/integer_utils.hpp"
#include "wholememory_ops/register.hpp"
namespace wholememory_ops {
template <typename IndexT>
__global__ void bucket_ids_for_ranks_kernel(const IndexT* indices,
size_t indice_count,
int64_t* dev_rank_id_count_ptr,
size_t embedding_entry_count_per_rank,
int world_size)
{
extern __shared__ int rank_count_shared[];
for (int idx = threadIdx.x; idx < world_size; idx += blockDim.x) {
rank_count_shared[idx] = 0;
}
__syncthreads();
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < indice_count;
idx += blockDim.x * gridDim.x) {
IndexT node_idx = indices[idx];
if (node_idx < 0) continue;
int rank = node_idx / embedding_entry_count_per_rank;
assert(rank >= 0 && rank < world_size);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
atomicAdd_block(&rank_count_shared[rank], 1);
#else
atomicAdd(&rank_count_shared[rank], 1);
#endif
}
__syncthreads();
for (int idx = threadIdx.x; idx < world_size; idx += blockDim.x) {
atomicAdd(reinterpret_cast<unsigned long long*>(dev_rank_id_count_ptr) + idx,
static_cast<unsigned long long>(rank_count_shared[idx]));
}
}
template <typename IndexT>
void bucket_ids_for_ranks_temp_fn(void* indices,
wholememory_array_description_t indice_desc,
int64_t* dev_rank_id_count_ptr,
size_t embedding_entry_count_per_rank,
int world_size,
int sm_count,
cudaStream_t stream)
{
static constexpr int BLOCK_SIZE = 128;
int block_count = wholememory::div_rounding_up_unsafe(indice_desc.size, BLOCK_SIZE);
block_count = std::min(block_count, sm_count * 4);
IndexT* indices_ptr = static_cast<IndexT*>(indices);
indices_ptr += indice_desc.storage_offset;
bucket_ids_for_ranks_kernel<<<block_count, BLOCK_SIZE, sizeof(int) * world_size, stream>>>(
indices_ptr,
indice_desc.size,
dev_rank_id_count_ptr,
embedding_entry_count_per_rank,
world_size);
}
REGISTER_DISPATCH_ONE_TYPE(BucketIdForRanks, bucket_ids_for_ranks_temp_fn, SINT3264)
wholememory_error_code_t bucket_ids_for_ranks(void* indices,
wholememory_array_description_t indice_desc,
int64_t* dev_rank_id_count_ptr,
size_t embedding_entry_count_per_rank,
int world_size,
cudaDeviceProp* prop,
cudaStream_t stream)
{
try {
WM_CUDA_CHECK(cudaMemsetAsync(dev_rank_id_count_ptr, 0, sizeof(int64_t) * world_size, stream));
if (indice_desc.size == 0) { return WHOLEMEMORY_SUCCESS; }
constexpr int K_DEFAULT_SM_COUNT = 108;
int sm_count = (prop != nullptr) ? prop->multiProcessorCount : K_DEFAULT_SM_COUNT;
DISPATCH_ONE_TYPE(indice_desc.dtype,
BucketIdForRanks,
indices,
indice_desc,
dev_rank_id_count_ptr,
embedding_entry_count_per_rank,
world_size,
sm_count,
stream);
WM_CUDA_CHECK(cudaGetLastError());
} catch (wholememory::cuda_error& wce) {
WHOLEMEMORY_ERROR("bucket_ids_for_ranks CUDA LOGIC Error %s\n", wce.what());
return WHOLEMEMORY_CUDA_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/gather_func_impl_floating_data_int64_indices.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gather_scatter_func.cuh"
#include <wholememory/wholememory.h>
#include "logger.hpp"
#include "wholememory_ops/register.hpp"
namespace wholememory_ops {
template <typename EmbeddingT, typename OutputT>
void gather_floating_int64_temp_func(wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
void* indices,
int64_t indice_count,
void* output,
wholememory_matrix_description_t output_desc,
cudaStream_t stream)
{
gather_temp_func<EmbeddingT, int64_t, OutputT>(
embedding_gref, embedding_desc, indices, indice_count, output, output_desc, stream);
}
REGISTER_DISPATCH_TWO_TYPES(GatherFuncFloatingInt64,
gather_floating_int64_temp_func,
HALF_FLOAT_DOUBLE,
HALF_FLOAT_DOUBLE)
wholememory_error_code_t gather_floating_int64_func(wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
void* indices,
wholememory_array_description_t indices_desc,
void* output,
wholememory_matrix_description_t output_desc,
cudaStream_t stream)
{
try {
WHOLEMEMORY_CHECK(wholememory_dtype_is_floating_number(embedding_desc.dtype));
WHOLEMEMORY_CHECK(wholememory_dtype_is_floating_number(output_desc.dtype));
WHOLEMEMORY_CHECK(indices_desc.dtype == WHOLEMEMORY_DT_INT64);
DISPATCH_TWO_TYPES(
embedding_desc.dtype,
output_desc.dtype,
GatherFuncFloatingInt64,
embedding_gref,
embedding_desc,
static_cast<char*>(indices) +
indices_desc.storage_offset * wholememory_dtype_get_element_size(indices_desc.dtype),
indices_desc.size,
output,
output_desc,
stream);
} catch (const wholememory::cuda_error& wle) {
WHOLEMEMORY_ERROR("gather CUDA LOGIC Error %s\n", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (const wholememory::logic_error& le) {
WHOLEMEMORY_ERROR("gather CUDA LOGIC Error %s\n", le.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (...) {
return WHOLEMEMORY_LOGIC_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/scatter_func_impl_integer_data_int64_indices.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gather_scatter_func.cuh"
#include <wholememory/wholememory.h>
#include "logger.hpp"
#include "wholememory_ops/register.hpp"
namespace wholememory_ops {
template <typename InputT, typename EmbeddingT>
void scatter_integer_int64_temp_func(const void* input,
wholememory_matrix_description_t input_desc,
void* indices,
int64_t indice_count,
wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream)
{
scatter_temp_func<InputT, int64_t, EmbeddingT>(
input, input_desc, indices, indice_count, embedding_gref, embedding_desc, stream);
}
REGISTER_DISPATCH_TWO_TYPES(ScatterFuncIntegerInt64,
scatter_integer_int64_temp_func,
ALLSINT,
ALLSINT)
wholememory_error_code_t scatter_integer_int64_func(const void* input,
wholememory_matrix_description_t input_desc,
void* indices,
wholememory_array_description_t indices_desc,
wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream)
{
try {
WHOLEMEMORY_CHECK(wholememory_dtype_is_integer_number(embedding_desc.dtype));
WHOLEMEMORY_CHECK(wholememory_dtype_is_integer_number(input_desc.dtype));
WHOLEMEMORY_CHECK(indices_desc.dtype == WHOLEMEMORY_DT_INT64);
DISPATCH_TWO_TYPES(
input_desc.dtype,
embedding_desc.dtype,
ScatterFuncIntegerInt64,
input,
input_desc,
static_cast<char*>(indices) +
indices_desc.storage_offset * wholememory_dtype_get_element_size(indices_desc.dtype),
indices_desc.size,
embedding_gref,
embedding_desc,
stream);
} catch (const wholememory::cuda_error& wle) {
WHOLEMEMORY_ERROR("scatter CUDA LOGIC Error %s\n", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (const wholememory::logic_error& le) {
WHOLEMEMORY_ERROR("scatter LOGIC Error %s\n", le.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (...) {
return WHOLEMEMORY_UNKNOW_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/embedding_optimizer_func.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime_api.h>
#include <wholememory/wholememory.h>
#include <wholememory/wholememory_tensor.h>
namespace wholememory_ops {
void set_memory_to_float_value(float* data_ptr, float value, size_t elt_count, cudaStream_t stream);
wholememory_error_code_t sgd_optimizer_step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
wholememory_tensor_t local_embedding_cache_tag,
wholememory_tensor_t local_embedding_cache_data,
int64_t local_entry_offset,
int cache_set_coverage,
float weight_decay,
float lr,
cudaStream_t stream);
wholememory_error_code_t lazy_adam_optimizer_step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
wholememory_tensor_t local_embedding_cache_tag,
wholememory_tensor_t local_embedding_cache_data,
wholememory_tensor_t per_element_local_state,
wholememory_tensor_t per_element_local_cache_tag,
wholememory_tensor_t per_element_local_cache_data,
wholememory_tensor_t per_embedding_local_state,
int64_t local_entry_offset,
int cache_set_coverage,
float weight_decay,
float epsilon,
float beta1,
float beta2,
bool adam_w,
float lr,
cudaStream_t stream);
wholememory_error_code_t ada_grad_optimizer_step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
wholememory_tensor_t local_embedding_cache_tag,
wholememory_tensor_t local_embedding_cache_data,
wholememory_tensor_t per_element_local_state,
wholememory_tensor_t per_element_local_cache_tag,
wholememory_tensor_t per_element_local_cache_data,
int64_t local_entry_offset,
int cache_set_coverage,
float weight_decay,
float epsilon,
float lr,
cudaStream_t stream);
wholememory_error_code_t rms_prop_optimizer_step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
wholememory_tensor_t local_embedding_cache_tag,
wholememory_tensor_t local_embedding_cache_data,
wholememory_tensor_t per_element_local_state,
wholememory_tensor_t per_element_local_cache_tag,
wholememory_tensor_t per_element_local_cache_data,
int64_t local_entry_offset,
int cache_set_coverage,
float weight_decay,
float epsilon,
float alpha,
float lr,
cudaStream_t stream);
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/gather_cached_func.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gather_cached_func.h"
#include "cuda_macros.hpp"
#include "embedding_cache_func.cuh"
#include "error.hpp"
#include "gather_scatter_func.cuh"
#include "logger.hpp"
#include "wholememory_ops/register.hpp"
#include "wholememory/embedding_cache.hpp"
namespace wholememory_ops {
template <typename EmbedT, typename OutputT, typename IndexT>
__global__ void gather_cached_kernel(wholememory_gref_t padded_embedding_gref,
int stride_in_int4,
int start_embedding_idx,
int embedding_size,
wholememory_gref_t cache_line_tag_gref,
wholememory_gref_t cached_embedding_gref,
const IndexT* input_indices,
OutputT* output,
int output_stride,
int cache_set_coverage,
int64_t cache_start_gid,
int64_t raw_start_gid)
{
IndexT entry_gid = input_indices[blockIdx.x];
IndexT fixed_cache_gid = entry_gid - cache_start_gid;
IndexT fixed_raw_gid = entry_gid - raw_start_gid;
IndexT cache_set_idx = fixed_cache_gid / cache_set_coverage;
int cache_set_lid = static_cast<int>(fixed_cache_gid - cache_set_idx * cache_set_coverage);
CacheLineInfo cache_line_info;
wholememory::device_reference<uint16_t> cache_line_tag_dev_ref(cache_line_tag_gref);
cache_line_info.LoadTag(&cache_line_tag_dev_ref[CacheLineInfo::kCacheSetSize * cache_set_idx]);
int cache_line_index = cache_line_info.KeyIndexSync(cache_set_lid);
int4* padded_embedding_ptr = nullptr;
__shared__ int4 s_embedding[32];
EmbedT* s_embedding_embed_t = reinterpret_cast<EmbedT*>(&s_embedding[0]);
wholememory::device_reference<int4> embedding_dev_ref(padded_embedding_gref);
wholememory::device_reference<int4> cached_embedding_dev_ref(cached_embedding_gref);
if (cache_line_index >= 0) {
padded_embedding_ptr = &cached_embedding_dev_ref[(static_cast<int64_t>(cache_set_idx) *
CacheLineInfo::kCacheSetSize +
cache_line_index) *
stride_in_int4];
} else {
padded_embedding_ptr = &embedding_dev_ref[static_cast<int64_t>(fixed_raw_gid) * stride_in_int4];
}
constexpr int EMBED_TYPE_SIZE = sizeof(EmbedT);
constexpr int EMBED_TYPE_COUNT_PER_INT4 = 16 / EMBED_TYPE_SIZE;
int start_int4_idx = EMBED_TYPE_SIZE * start_embedding_idx / 16;
int start_padding = start_embedding_idx % EMBED_TYPE_COUNT_PER_INT4;
int end_int4_idx = (EMBED_TYPE_SIZE * (start_embedding_idx + embedding_size) + 15) / 16;
int shared_start_idx = start_padding;
int output_start_idx = 0;
OutputT* output_embed_ptr = output + static_cast<int64_t>(blockIdx.x) * output_stride;
for (; start_int4_idx * EMBED_TYPE_COUNT_PER_INT4 < start_embedding_idx + embedding_size;
start_int4_idx += 32) {
int const int4_idx = start_int4_idx + threadIdx.x;
if (int4_idx < end_int4_idx) { s_embedding[threadIdx.x] = padded_embedding_ptr[int4_idx]; }
int shared_end_idx =
min(32 * EMBED_TYPE_COUNT_PER_INT4,
start_embedding_idx + embedding_size - start_int4_idx * EMBED_TYPE_COUNT_PER_INT4);
__syncthreads();
while (output_start_idx < embedding_size && shared_start_idx < shared_end_idx) {
if (shared_start_idx + threadIdx.x < shared_end_idx) {
OutputT output_value =
convert_type<EmbedT, OutputT>(s_embedding_embed_t[shared_start_idx + threadIdx.x]);
output_embed_ptr[output_start_idx + threadIdx.x] = output_value;
}
int const data_count = min(32, shared_end_idx - shared_start_idx);
output_start_idx += data_count;
shared_start_idx += data_count;
}
shared_start_idx = 0;
__syncthreads();
}
}
template <typename EmbedT, typename OutputT, typename IndexT>
void gather_cached_temp_func(wholememory_gref_t padded_embedding_gref,
wholememory_matrix_description_t embedding_desc,
wholememory_gref_t cached_embedding_gref,
wholememory_matrix_description_t cached_embedding_desc,
wholememory_gref_t cache_line_tag_gref,
void* input_indices,
wholememory_array_description_t indices_desc,
void* output,
wholememory_matrix_description_t output_desc,
int cache_set_coverage,
int64_t cache_start_gid,
int64_t raw_start_gid,
cudaStream_t stream)
{
int indice_count = indices_desc.size;
if (indice_count == 0) return;
WHOLEMEMORY_CHECK_NOTHROW(embedding_desc.stride == cached_embedding_desc.stride);
WHOLEMEMORY_CHECK_NOTHROW(embedding_desc.stride *
wholememory_dtype_get_element_size(embedding_desc.dtype) %
sizeof(int4) ==
0);
WHOLEMEMORY_CHECK_NOTHROW(embedding_desc.sizes[1] == output_desc.sizes[1]);
WHOLEMEMORY_CHECK_NOTHROW(indices_desc.size == output_desc.sizes[0]);
int stride_in_int4 =
embedding_desc.stride * wholememory_dtype_get_element_size(embedding_desc.dtype) / sizeof(int4);
int start_embedding_idx = embedding_desc.storage_offset;
int embedding_size = embedding_desc.sizes[1];
int output_stride = output_desc.stride;
gather_cached_kernel<EmbedT, OutputT, IndexT><<<indice_count, 32, 0, stream>>>(
padded_embedding_gref,
stride_in_int4,
start_embedding_idx,
embedding_size,
cache_line_tag_gref,
cached_embedding_gref,
static_cast<const IndexT*>(input_indices) + indices_desc.storage_offset,
static_cast<OutputT*>(output) + output_desc.storage_offset,
output_stride,
cache_set_coverage,
cache_start_gid,
raw_start_gid);
WM_CUDA_DEBUG_SYNC_STREAM(stream);
}
REGISTER_DISPATCH_THREE_TYPES(
GatherCachedFuncFloating, gather_cached_temp_func, ALLFLOAT, ALLFLOAT, SINT3264)
REGISTER_DISPATCH_THREE_TYPES(
GatherCachedFuncInteger, gather_cached_temp_func, ALLSINT, ALLSINT, SINT3264)
wholememory_error_code_t gather_cached_func(wholememory_gref_t padded_embedding_gref,
wholememory_tensor_description_t* embedding_desc,
wholememory_gref_t cached_embedding_gref,
wholememory_tensor_description_t* cached_embedding_desc,
wholememory_gref_t cache_line_tag_gref,
void* indices,
wholememory_tensor_description_t* indices_desc,
void* output,
wholememory_tensor_description_t* output_desc,
int cache_set_coverage,
int64_t cache_start_gid,
int64_t raw_start_gid,
cudaStream_t stream)
{
if (embedding_desc->dim != 2 || cached_embedding_desc->dim != 2 || indices_desc->dim != 1 ||
output_desc->dim != 2) {
WHOLEMEMORY_ERROR("tensor dim not right.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (embedding_desc->strides[1] != 1 || cached_embedding_desc->strides[1] != 1 ||
indices_desc->strides[0] != 1 || output_desc->strides[1] != 1) {
WHOLEMEMORY_ERROR("tensor stride of last dim should be 1.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (embedding_desc->strides[0] != cached_embedding_desc->strides[0]) {
WHOLEMEMORY_ERROR("padded_embedding and cached_embedding should have same strides[0].");
return WHOLEMEMORY_INVALID_VALUE;
}
if (embedding_desc->strides[0] * wholememory_dtype_get_element_size(embedding_desc->dtype) %
sizeof(int4) !=
0) {
WHOLEMEMORY_ERROR("embedding should be aligned to int4 (16 bytes).");
return WHOLEMEMORY_INVALID_INPUT;
}
if (embedding_desc->sizes[1] != output_desc->sizes[1]) {
WHOLEMEMORY_ERROR("embedding size for embedding and output should be same, %ld v.s. %ld.",
embedding_desc->sizes[1],
output_desc->sizes[1]);
return WHOLEMEMORY_INVALID_INPUT;
}
if (indices_desc->dtype != WHOLEMEMORY_DT_INT64 && indices_desc->dtype != WHOLEMEMORY_DT_INT) {
WHOLEMEMORY_ERROR("indices should be int64 or int32.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (indices_desc->sizes[0] != output_desc->sizes[0]) {
WHOLEMEMORY_ERROR("indices size and output entry count should be the same.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (embedding_desc->dtype != cached_embedding_desc->dtype) {
WHOLEMEMORY_ERROR("embedding and cache should be same type");
return WHOLEMEMORY_INVALID_INPUT;
}
wholememory_dtype_t const embedding_dtype = embedding_desc->dtype;
wholememory_dtype_t const output_dtype = output_desc->dtype;
if (wholememory_dtype_is_floating_number(embedding_dtype) &&
!wholememory_dtype_is_floating_number(output_dtype) ||
wholememory_dtype_is_integer_number(embedding_dtype) &&
!wholememory_dtype_is_integer_number(output_dtype)) {
WHOLEMEMORY_ERROR("embedding and output should be all float or all integer");
return WHOLEMEMORY_INVALID_INPUT;
}
wholememory_matrix_description_t embedding_matrix_desc, cached_embedding_matrix_desc,
output_matrix_desc;
WHOLEMEMORY_CHECK_NOTHROW(
wholememory_convert_tensor_desc_to_matrix(&embedding_matrix_desc, embedding_desc));
WHOLEMEMORY_CHECK_NOTHROW(wholememory_convert_tensor_desc_to_matrix(&cached_embedding_matrix_desc,
cached_embedding_desc));
WHOLEMEMORY_CHECK_NOTHROW(
wholememory_convert_tensor_desc_to_matrix(&output_matrix_desc, output_desc));
wholememory_array_description_t indices_array_desc;
WHOLEMEMORY_CHECK_NOTHROW(
wholememory_convert_tensor_desc_to_array(&indices_array_desc, indices_desc));
if (indices_array_desc.size == 0) return WHOLEMEMORY_SUCCESS;
if (wholememory_dtype_is_floating_number(embedding_dtype)) {
DISPATCH_THREE_TYPES(embedding_dtype,
output_dtype,
indices_desc->dtype,
GatherCachedFuncFloating,
padded_embedding_gref,
embedding_matrix_desc,
cached_embedding_gref,
cached_embedding_matrix_desc,
cache_line_tag_gref,
indices,
indices_array_desc,
output,
output_matrix_desc,
cache_set_coverage,
cache_start_gid,
raw_start_gid,
stream);
} else {
DISPATCH_THREE_TYPES(embedding_dtype,
output_dtype,
indices_desc->dtype,
GatherCachedFuncInteger,
padded_embedding_gref,
embedding_matrix_desc,
cached_embedding_gref,
cached_embedding_matrix_desc,
cache_line_tag_gref,
indices,
indices_array_desc,
output,
output_matrix_desc,
cache_set_coverage,
cache_start_gid,
raw_start_gid,
stream);
}
WM_CUDA_DEBUG_SYNC_STREAM(stream);
return WHOLEMEMORY_SUCCESS;
}
template <typename EmbedT, typename OutputT, typename IndexT>
__global__ void try_gather_cached_kernel(int stride_in_int4,
int start_embedding_idx,
int embedding_size,
wholememory_gref_t cache_line_tag_gref,
wholememory_gref_t cached_embedding_gref,
const IndexT* input_indices,
IndexT* hit_indices,
IndexT* miss_indices,
OutputT* output,
int output_stride,
int cache_set_coverage,
int64_t cache_start_gid)
{
IndexT entry_gid = input_indices[blockIdx.x];
IndexT fixed_cache_gid = entry_gid - cache_start_gid;
IndexT cache_set_idx = fixed_cache_gid / cache_set_coverage;
int cache_set_lid = static_cast<int>(fixed_cache_gid - cache_set_idx * cache_set_coverage);
CacheLineInfo cache_line_info;
wholememory::device_reference<uint16_t> cache_line_tag_dev_ref(cache_line_tag_gref);
cache_line_info.LoadTag(&cache_line_tag_dev_ref[CacheLineInfo::kCacheSetSize * cache_set_idx]);
int cache_line_index = cache_line_info.KeyIndexSync(cache_set_lid);
int4* padded_embedding_ptr = nullptr;
__shared__ int4 s_embedding[32];
EmbedT* s_embedding_embed_t = reinterpret_cast<EmbedT*>(&s_embedding[0]);
wholememory::device_reference<int4> cached_embedding_dev_ref(cached_embedding_gref);
if (cache_line_index >= 0) {
padded_embedding_ptr =
&cached_embedding_dev_ref[static_cast<int64_t>(cache_set_idx * CacheLineInfo::kCacheSetSize +
cache_line_index) *
stride_in_int4];
}
if (threadIdx.x == 0) {
if (hit_indices) hit_indices[blockIdx.x] = cache_line_index >= 0 ? entry_gid : (IndexT)-1;
if (miss_indices) miss_indices[blockIdx.x] = cache_line_index >= 0 ? (IndexT)-1 : entry_gid;
}
if (cache_line_index < 0) return;
constexpr int EMBED_TYPE_SIZE = sizeof(EmbedT);
constexpr int EMBED_TYPE_COUNT_PER_INT4 = 16 / EMBED_TYPE_SIZE;
int start_int4_idx = EMBED_TYPE_SIZE * start_embedding_idx / 16;
int start_padding = start_embedding_idx % EMBED_TYPE_COUNT_PER_INT4;
int end_int4_idx = (EMBED_TYPE_SIZE * (start_embedding_idx + embedding_size) + 15) / 16;
int shared_start_idx = start_padding;
int output_start_idx = 0;
OutputT* output_embed_ptr = output + static_cast<int64_t>(blockIdx.x) * output_stride;
for (; start_int4_idx * EMBED_TYPE_COUNT_PER_INT4 < start_embedding_idx + embedding_size;
start_int4_idx += 32) {
int const int4_idx = start_int4_idx + threadIdx.x;
if (int4_idx < end_int4_idx) { s_embedding[threadIdx.x] = padded_embedding_ptr[int4_idx]; }
int shared_end_idx =
min(32 * EMBED_TYPE_COUNT_PER_INT4,
start_embedding_idx + embedding_size - start_int4_idx * EMBED_TYPE_COUNT_PER_INT4);
__syncthreads();
while (output_start_idx < embedding_size && shared_start_idx < shared_end_idx) {
if (shared_start_idx + threadIdx.x < shared_end_idx) {
OutputT output_value =
convert_type<EmbedT, OutputT>(s_embedding_embed_t[shared_start_idx + threadIdx.x]);
output_embed_ptr[output_start_idx + threadIdx.x] = output_value;
}
int const data_count = min(32, shared_end_idx - shared_start_idx);
output_start_idx += data_count;
shared_start_idx += data_count;
}
shared_start_idx = 0;
__syncthreads();
}
}
template <typename EmbedT, typename OutputT, typename IndexT>
void try_gather_cached_temp_func(wholememory_gref_t cached_embedding_gref,
wholememory_matrix_description_t cached_embedding_desc,
wholememory_gref_t cache_line_tag_gref,
void* input_indices,
wholememory_array_description_t indices_desc,
void* hit_indices,
void* miss_indices,
void* output,
wholememory_matrix_description_t output_desc,
int cache_set_coverage,
int64_t cache_start_gid,
cudaStream_t stream)
{
int indice_count = indices_desc.size;
WHOLEMEMORY_CHECK_NOTHROW(cached_embedding_desc.stride *
wholememory_dtype_get_element_size(cached_embedding_desc.dtype) %
sizeof(int4) ==
0);
WHOLEMEMORY_CHECK_NOTHROW(cached_embedding_desc.sizes[1] == output_desc.sizes[1]);
WHOLEMEMORY_CHECK_NOTHROW(indices_desc.size == output_desc.sizes[0]);
int stride_in_int4 = cached_embedding_desc.stride *
wholememory_dtype_get_element_size(cached_embedding_desc.dtype) /
sizeof(int4);
int start_embedding_idx = cached_embedding_desc.storage_offset;
int embedding_size = cached_embedding_desc.sizes[1];
int output_stride = output_desc.stride;
try_gather_cached_kernel<EmbedT, OutputT, IndexT><<<indice_count, 32, 0, stream>>>(
stride_in_int4,
start_embedding_idx,
embedding_size,
cache_line_tag_gref,
cached_embedding_gref,
static_cast<const IndexT*>(input_indices) + indices_desc.storage_offset,
static_cast<IndexT*>(hit_indices),
static_cast<IndexT*>(miss_indices),
static_cast<OutputT*>(output) + output_desc.storage_offset,
output_stride,
cache_set_coverage,
cache_start_gid);
WM_CUDA_DEBUG_SYNC_STREAM(stream);
}
REGISTER_DISPATCH_THREE_TYPES(
TryGatherCachedFuncFloating, try_gather_cached_temp_func, ALLFLOAT, ALLFLOAT, SINT3264)
REGISTER_DISPATCH_THREE_TYPES(
TryGatherCachedFuncInteger, try_gather_cached_temp_func, ALLSINT, ALLSINT, SINT3264)
wholememory_error_code_t try_gather_cached_func(
wholememory_gref_t cached_embedding_gref,
wholememory_tensor_description_t* cached_embedding_desc,
wholememory_gref_t cache_line_tag_gref,
void* indices,
wholememory_tensor_description_t* indices_desc,
void* hit_indices,
void* miss_indices,
void* output,
wholememory_tensor_description_t* output_desc,
int cache_set_coverage,
int64_t cache_start_gid,
cudaStream_t stream)
{
if (cached_embedding_desc->dim != 2 || indices_desc->dim != 1 || output_desc->dim != 2) {
WHOLEMEMORY_ERROR("tensor dim not right.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (cached_embedding_desc->strides[1] != 1 || indices_desc->strides[0] != 1 ||
output_desc->strides[1] != 1) {
WHOLEMEMORY_ERROR("tensor stride of last dim should be 1.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (cached_embedding_desc->strides[0] *
wholememory_dtype_get_element_size(cached_embedding_desc->dtype) % sizeof(int4) !=
0) {
WHOLEMEMORY_ERROR("cached_embedding_desc should be aligned to int4 (16 bytes).");
return WHOLEMEMORY_INVALID_INPUT;
}
if (cached_embedding_desc->sizes[1] != output_desc->sizes[1]) {
WHOLEMEMORY_ERROR("embedding size for embedding and output should be same.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (indices_desc->dtype != WHOLEMEMORY_DT_INT64 && indices_desc->dtype != WHOLEMEMORY_DT_INT) {
WHOLEMEMORY_ERROR("indices should be int64 or int32.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (indices_desc->sizes[0] != output_desc->sizes[0]) {
WHOLEMEMORY_ERROR("indices size and output entry count should be the same.");
return WHOLEMEMORY_INVALID_INPUT;
}
wholememory_dtype_t const embedding_dtype = cached_embedding_desc->dtype;
wholememory_dtype_t const output_dtype = output_desc->dtype;
if (wholememory_dtype_is_floating_number(embedding_dtype) &&
!wholememory_dtype_is_floating_number(output_dtype) ||
wholememory_dtype_is_integer_number(embedding_dtype) &&
!wholememory_dtype_is_integer_number(output_dtype)) {
WHOLEMEMORY_ERROR("embedding and output should be all float or all integer");
return WHOLEMEMORY_INVALID_INPUT;
}
wholememory_matrix_description_t embedding_matrix_desc, cached_embedding_matrix_desc,
output_matrix_desc;
WHOLEMEMORY_CHECK_NOTHROW(
wholememory_convert_tensor_desc_to_matrix(&embedding_matrix_desc, cached_embedding_desc));
WHOLEMEMORY_CHECK_NOTHROW(wholememory_convert_tensor_desc_to_matrix(&cached_embedding_matrix_desc,
cached_embedding_desc));
WHOLEMEMORY_CHECK_NOTHROW(
wholememory_convert_tensor_desc_to_matrix(&output_matrix_desc, output_desc));
wholememory_array_description_t indices_array_desc;
WHOLEMEMORY_CHECK_NOTHROW(
wholememory_convert_tensor_desc_to_array(&indices_array_desc, indices_desc));
if (indices_array_desc.size == 0) return WHOLEMEMORY_SUCCESS;
if (wholememory_dtype_is_floating_number(embedding_dtype)) {
DISPATCH_THREE_TYPES(embedding_dtype,
output_dtype,
indices_desc->dtype,
TryGatherCachedFuncFloating,
cached_embedding_gref,
cached_embedding_matrix_desc,
cache_line_tag_gref,
indices,
indices_array_desc,
hit_indices,
miss_indices,
output,
output_matrix_desc,
cache_set_coverage,
cache_start_gid,
stream);
} else {
DISPATCH_THREE_TYPES(embedding_dtype,
output_dtype,
indices_desc->dtype,
TryGatherCachedFuncInteger,
cached_embedding_gref,
cached_embedding_matrix_desc,
cache_line_tag_gref,
indices,
indices_array_desc,
hit_indices,
miss_indices,
output,
output_matrix_desc,
cache_set_coverage,
cache_start_gid,
stream);
}
WM_CUDA_DEBUG_SYNC_STREAM(stream);
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/gather_scatter_func.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/global_reference.h>
#include <wholememory/tensor_description.h>
#include <wholememory/wholememory.h>
namespace wholememory_ops {
wholememory_error_code_t gather_func(wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
void* indices,
wholememory_array_description_t indices_desc,
void* output,
wholememory_matrix_description_t output_desc,
cudaStream_t stream);
wholememory_error_code_t scatter_func(const void* input,
wholememory_matrix_description_t input_desc,
void* indices,
wholememory_array_description_t indices_desc,
wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream);
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/scatter_func.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gather_scatter_func.h"
#include "cuda_macros.hpp"
#include "error.hpp"
#include "logger.hpp"
namespace wholememory_ops {
wholememory_error_code_t scatter_integer_int32_func(const void* input,
wholememory_matrix_description_t input_desc,
void* indices,
wholememory_array_description_t indices_desc,
wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream);
wholememory_error_code_t scatter_integer_int64_func(const void* input,
wholememory_matrix_description_t input_desc,
void* indices,
wholememory_array_description_t indices_desc,
wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream);
wholememory_error_code_t scatter_floating_int32_func(
const void* input,
wholememory_matrix_description_t input_desc,
void* indices,
wholememory_array_description_t indices_desc,
wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream);
wholememory_error_code_t scatter_floating_int64_func(
const void* input,
wholememory_matrix_description_t input_desc,
void* indices,
wholememory_array_description_t indices_desc,
wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream);
wholememory_error_code_t scatter_func(const void* input,
wholememory_matrix_description_t input_desc,
void* indices,
wholememory_array_description_t indices_desc,
wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
cudaStream_t stream)
{
try {
bool embedding_is_float = wholememory_dtype_is_floating_number(embedding_desc.dtype);
WHOLEMEMORY_CHECK(embedding_is_float ||
wholememory_dtype_is_integer_number(embedding_desc.dtype));
bool input_is_float = wholememory_dtype_is_floating_number(input_desc.dtype);
WHOLEMEMORY_CHECK(input_is_float || wholememory_dtype_is_integer_number(input_desc.dtype));
WHOLEMEMORY_EXPECTS(
embedding_is_float == input_is_float,
"embedding and output should be same number type, e.g. floating number or integer number.");
if (indices_desc.size == 0) { return WHOLEMEMORY_SUCCESS; }
wholememory_error_code_t (*p_scatter_func)(const void*,
wholememory_matrix_description_t,
void*,
wholememory_array_description_t,
wholememory_gref_t,
wholememory_matrix_description_t,
cudaStream_t) = nullptr;
if (embedding_is_float) {
if (indices_desc.dtype == WHOLEMEMORY_DT_INT) {
p_scatter_func = scatter_floating_int32_func;
} else {
p_scatter_func = scatter_floating_int64_func;
}
} else {
if (indices_desc.dtype == WHOLEMEMORY_DT_INT) {
p_scatter_func = scatter_integer_int32_func;
} else {
p_scatter_func = scatter_integer_int64_func;
}
}
return p_scatter_func(
input, input_desc, indices, indices_desc, embedding_gref, embedding_desc, stream);
} catch (const wholememory::cuda_error& wle) {
WHOLEMEMORY_ERROR("scatter CUDA LOGIC Error %s\n", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (const wholememory::logic_error& le) {
WHOLEMEMORY_ERROR("scatter LOGIC Error %s\n", le.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (...) {
return WHOLEMEMORY_UNKNOW_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/gather_func_impl_floating_data_int32_indices.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gather_scatter_func.cuh"
#include <wholememory/wholememory.h>
#include "logger.hpp"
#include "wholememory_ops/register.hpp"
namespace wholememory_ops {
template <typename EmbeddingT, typename OutputT>
void gather_floating_int32_temp_func(wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
void* indices,
int64_t indice_count,
void* output,
wholememory_matrix_description_t output_desc,
cudaStream_t stream)
{
gather_temp_func<EmbeddingT, int32_t, OutputT>(
embedding_gref, embedding_desc, indices, indice_count, output, output_desc, stream);
}
REGISTER_DISPATCH_TWO_TYPES(GatherFuncFloatingInt32,
gather_floating_int32_temp_func,
HALF_FLOAT_DOUBLE,
HALF_FLOAT_DOUBLE)
wholememory_error_code_t gather_floating_int32_func(wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
void* indices,
wholememory_array_description_t indices_desc,
void* output,
wholememory_matrix_description_t output_desc,
cudaStream_t stream)
{
try {
WHOLEMEMORY_CHECK(wholememory_dtype_is_floating_number(embedding_desc.dtype));
WHOLEMEMORY_CHECK(wholememory_dtype_is_floating_number(output_desc.dtype));
WHOLEMEMORY_CHECK(indices_desc.dtype == WHOLEMEMORY_DT_INT);
DISPATCH_TWO_TYPES(
embedding_desc.dtype,
output_desc.dtype,
GatherFuncFloatingInt32,
embedding_gref,
embedding_desc,
static_cast<char*>(indices) +
indices_desc.storage_offset * wholememory_dtype_get_element_size(indices_desc.dtype),
indices_desc.size,
output,
output_desc,
stream);
} catch (const wholememory::cuda_error& wle) {
WHOLEMEMORY_ERROR("gather CUDA LOGIC Error %s\n", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (const wholememory::logic_error& le) {
WHOLEMEMORY_ERROR("gather CUDA LOGIC Error %s\n", le.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (...) {
return WHOLEMEMORY_LOGIC_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/exchange_embeddings_nccl_func.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/tensor_description.h>
#include <wholememory/wholememory.h>
#include <wholememory_ops/temp_memory_handle.hpp>
#include <wholememory_ops/thrust_allocator.hpp>
namespace wholememory_ops {
/**
* Exchange embeddings between ranks
* @param dev_local_gather_buffer_ptr : local buffer to send
* @param host_send_to_rank_count_ptr : id count that current rank send to other ranks
* @param host_recv_from_rank_count_ptr : id count that current rank receive from each rank
* @param dev_embedding_recv_buffer_ptr : local buffer to receive embedding data
* @param embedding_size : embedding size in bytes.
* @param wm_comm : WholeMemory communicator
* @param stream : CUDA stream to use
* @return : WHOLEMEMORY_SUCCESS on success, others on failure.
*/
wholememory_error_code_t exchange_embeddings_nccl_func(const void* dev_local_gather_buffer_ptr,
const int64_t* host_send_to_rank_count_ptr,
const int64_t* host_recv_from_rank_count_ptr,
void* dev_embedding_recv_buffer_ptr,
size_t embedding_size,
wholememory_comm_t wm_comm,
cudaStream_t stream);
/**
* Dedup indice and gradients
* @param indices : indices
* @param indice_desc : array description of indice
* @param grads : gradients
* @param grads_desc : matrix description of gradients
* @param dedup_indice : output indice
* @param dedup_grads : output gradients
* @param p_env_fn : env_fns
* @param stream : CUDA stream to use
* @return : deduped indice count
*/
int64_t dedup_indice_and_gradients(const void* indices,
wholememory_array_description_t indice_desc,
const float* grads,
wholememory_matrix_description_t grads_desc,
void* dedup_indice,
float* dedup_grads,
wholememory_env_func_t* p_env_fn,
cudaStream_t stream);
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/exchange_embeddings_nccl_func.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "exchange_embeddings_nccl_func.h"
#include <vector>
#include <cub/device/device_radix_sort.cuh>
#include <thrust/sequence.h>
#include <thrust/unique.h>
#include <wholememory/communicator.hpp>
#include "cuda_macros.hpp"
#include "logger.hpp"
#include "wholememory_ops/register.hpp"
namespace wholememory_ops {
wholememory_error_code_t exchange_embeddings_nccl_func(const void* dev_local_gather_buffer_ptr,
const int64_t* host_send_to_rank_count_ptr,
const int64_t* host_recv_from_rank_count_ptr,
void* dev_embedding_recv_buffer_ptr,
size_t embedding_size,
wholememory_comm_t wm_comm,
cudaStream_t stream)
{
try {
int world_size;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_size(&world_size, wm_comm));
std::vector<size_t> embedding_send_counts(world_size), embedding_send_displs(world_size);
std::vector<size_t> embedding_recv_counts(world_size), embedding_recv_displs(world_size);
size_t send_disp = 0, recv_disp = 0;
for (int i = 0; i < world_size; i++) {
embedding_send_displs[i] = send_disp;
embedding_recv_displs[i] = recv_disp;
size_t send_count = host_send_to_rank_count_ptr[i] * embedding_size;
size_t recv_count = host_recv_from_rank_count_ptr[i] * embedding_size;
embedding_send_counts[i] = send_count;
embedding_recv_counts[i] = recv_count;
send_disp += send_count;
recv_disp += recv_count;
}
wm_comm->alltoallv(dev_local_gather_buffer_ptr,
dev_embedding_recv_buffer_ptr,
embedding_send_counts.data(),
embedding_send_displs.data(),
embedding_recv_counts.data(),
embedding_recv_displs.data(),
WHOLEMEMORY_DT_INT8,
stream);
WM_CUDA_DEBUG_SYNC_STREAM(stream);
WHOLEMEMORY_EXPECTS(wm_comm->sync_stream(stream) == WHOLEMEMORY_SUCCESS,
"Embedding AllToAllV failed.");
} catch (wholememory::logic_error& wle) {
WHOLEMEMORY_ERROR("exchange_embeddings_nccl_func LOGIC Error %s\n", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (...) {
return WHOLEMEMORY_UNKNOW_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
__global__ void DedupIndiceAndGradientsKernel(int raw_count,
int unique_count,
const int* start_pos,
const int* mapping_array,
const float* grads,
float* dedup_grads,
int embedding_dim,
int embedding_stride)
{
int bidx = blockIdx.x;
int start_offset = start_pos[bidx];
int end_offset = raw_count;
if (bidx != unique_count - 1) end_offset = start_pos[bidx + 1];
for (int idx = start_offset; idx < end_offset; idx++) {
int map_idx = mapping_array[idx];
const float* current_grads_ptr = grads + map_idx * embedding_stride;
float* current_dedup_grads_ptr = dedup_grads + bidx * embedding_stride;
if (idx == start_offset) {
for (int dim = threadIdx.x; dim < embedding_dim; dim += blockDim.x) {
current_dedup_grads_ptr[dim] = current_grads_ptr[dim];
}
} else {
for (int dim = threadIdx.x; dim < embedding_dim; dim += blockDim.x) {
current_dedup_grads_ptr[dim] += current_grads_ptr[dim];
}
}
}
}
template <typename IndexT>
void dedup_indice_and_gradients_temp_func(int64_t* run_count,
const void* indices_ptr,
wholememory_array_description_t indice_desc,
const float* grads,
wholememory_matrix_description_t grads_desc,
void* dedup_indice_ptr,
float* dedup_grads,
wholememory_env_func_t* p_env_fn,
cudaStream_t stream)
{
const IndexT* indice = static_cast<const IndexT*>(indices_ptr);
IndexT* dedup_indice = static_cast<IndexT*>(dedup_indice_ptr);
int raw_count = indice_desc.size;
if (raw_count == 0) {
*run_count = 0;
return;
}
IndexT* sorted_indice = dedup_indice;
wm_thrust_allocator allocator(p_env_fn);
wholememory_ops::temp_memory_handle mapping_sequence_handle(p_env_fn);
int* dev_mapping_sequence =
static_cast<int*>(mapping_sequence_handle.device_malloc(raw_count * 2, WHOLEMEMORY_DT_INT));
int* dev_indice_mapping = dev_mapping_sequence + raw_count;
thrust::sequence(thrust::cuda::par(allocator).on(stream),
dev_mapping_sequence,
dev_mapping_sequence + raw_count,
0);
void* cub_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
cub::DeviceRadixSort::SortPairs(cub_temp_storage,
temp_storage_bytes,
indice,
sorted_indice,
dev_mapping_sequence,
dev_indice_mapping,
raw_count,
0,
sizeof(IndexT) * 8,
stream);
cub_temp_storage = allocator.allocate(temp_storage_bytes);
cub::DeviceRadixSort::SortPairs(cub_temp_storage,
temp_storage_bytes,
indice,
sorted_indice,
dev_mapping_sequence,
dev_indice_mapping,
raw_count,
0,
sizeof(IndexT) * 8,
stream);
auto thrust_ret = thrust::unique_by_key(thrust::cuda::par(allocator).on(stream),
sorted_indice,
sorted_indice + raw_count,
dev_mapping_sequence);
*run_count = thrust_ret.first - sorted_indice;
int embedding_dim = grads_desc.sizes[1];
int embedding_stride = grads_desc.stride;
int thread_count = std::min<int>(embedding_dim, 256);
DedupIndiceAndGradientsKernel<<<*run_count, thread_count, 0, stream>>>(raw_count,
*run_count,
dev_mapping_sequence,
dev_indice_mapping,
grads,
dedup_grads,
embedding_dim,
embedding_stride);
WM_CUDA_CHECK_NO_THROW(cudaGetLastError());
WM_CUDA_DEBUG_SYNC_STREAM(stream);
}
REGISTER_DISPATCH_ONE_TYPE(DedupIndiceAndGradientsTempFunc,
dedup_indice_and_gradients_temp_func,
SINT3264)
int64_t dedup_indice_and_gradients(const void* indices,
wholememory_array_description_t indice_desc,
const float* grads,
wholememory_matrix_description_t grads_desc,
void* dedup_indice,
float* dedup_grads,
wholememory_env_func_t* p_env_fn,
cudaStream_t stream)
{
WHOLEMEMORY_CHECK_NOTHROW(indice_desc.dtype == WHOLEMEMORY_DT_INT ||
indice_desc.dtype == WHOLEMEMORY_DT_INT64);
WHOLEMEMORY_CHECK_NOTHROW(indice_desc.size == grads_desc.sizes[0]);
WHOLEMEMORY_CHECK_NOTHROW(grads_desc.dtype == WHOLEMEMORY_DT_FLOAT);
int64_t run_count = 0;
DISPATCH_ONE_TYPE(indice_desc.dtype,
DedupIndiceAndGradientsTempFunc,
&run_count,
indices,
indice_desc,
grads,
grads_desc,
dedup_indice,
dedup_grads,
p_env_fn,
stream);
return run_count;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/embedding_cache_func.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdint.h>
#include <raft/matrix/detail/select_k-inl.cuh>
namespace wholememory_ops {
__device__ __forceinline__ unsigned int WarpMatchLocalIDPairSync(int targets, int key)
{
int xor_target = __shfl_xor_sync(0xFFFFFFFF, targets, 0x10);
bool first_half_lane = (threadIdx.x & 0x10) == 0;
int match_value = 0;
unsigned int match_flag, tmp_match_flag;
match_value = first_half_lane ? key : xor_target;
tmp_match_flag = __match_any_sync(0xFFFFFFFF, match_value);
if (first_half_lane) match_flag = tmp_match_flag >> 16;
match_value = first_half_lane ? key : targets;
tmp_match_flag = __match_any_sync(0xFFFFFFFF, match_value);
if (first_half_lane) match_flag |= (tmp_match_flag & 0xFFFF0000);
match_value = !first_half_lane ? key : targets;
tmp_match_flag = __match_any_sync(0xFFFFFFFF, match_value);
if (!first_half_lane) match_flag = (tmp_match_flag & 0xFFFF);
match_value = !first_half_lane ? key : xor_target;
tmp_match_flag = __match_any_sync(0xFFFFFFFF, match_value);
if (!first_half_lane) match_flag |= (tmp_match_flag & 0xFFFF) << 16;
return match_flag;
}
__device__ __forceinline__ int WarpFindMaxScaleSync(int scale)
{
#if __CUDA_ARCH__ >= 800
return __reduce_max_sync(0xFFFFFFFF, scale);
#else
for (int delta = 16; delta > 0; delta /= 2) {
scale = max(__shfl_down_sync(0xFFFFFFFF, scale, delta, 32), scale);
}
return __shfl_sync(0xFFFFFFFF, scale, 0, 32);
#endif
}
class CacheLineInfo {
public:
__device__ __forceinline__ CacheLineInfo() {}
__device__ __forceinline__ void LoadTag(const uint16_t* tag_ptr) { tag_ = tag_ptr[threadIdx.x]; }
__device__ __forceinline__ void LoadInfo(const uint16_t* tag_ptr, const uint16_t* count_ptr)
{
tag_ = tag_ptr[threadIdx.x];
lfu_count_ = count_ptr[threadIdx.x];
}
__device__ __forceinline__ void StoreTag(uint16_t* tag_ptr) const { tag_ptr[threadIdx.x] = tag_; }
__device__ __forceinline__ void StoreInfo(uint16_t* tag_ptr, uint16_t* count_ptr) const
{
tag_ptr[threadIdx.x] = tag_;
count_ptr[threadIdx.x] = lfu_count_;
}
__device__ __forceinline__ bool IsValid() const { return (tag_ & kValidMask) != 0U; }
__device__ __forceinline__ bool IsInValid() const { return !IsValid(); }
__device__ __forceinline__ bool IsModified() const { return (tag_ & kModifiedMask) != 0U; };
__device__ __forceinline__ int LocalID() const
{
return IsValid() ? (int)(tag_ & kLocalIDMask) : -1;
}
__device__ __forceinline__ int ScaleSync() const
{
return __ballot_sync(0xFFFFFFFF, lfu_count_ & kScaleMask);
}
__device__ __forceinline__ int64_t LfuCountSync() const
{
int const scale = ScaleSync();
int64_t count = (lfu_count_ & kCountMask);
count <<= scale;
count += (1ULL << scale) - 1;
return count;
}
/**
* Check if local_id is in CacheSet
* @param local_id : local_id
* @return : CacheLine Id if key in cache, else -1.
*/
__device__ __forceinline__ int KeyIndexSync(int local_id) const
{
bool is_key_in_cache_line = IsValid() && LocalID() == local_id;
uint32_t mask = __ballot_sync(0xFFFFFFFF, static_cast<int>(is_key_in_cache_line));
// __ffs(0) returns 0
return __ffs(mask) - 1;
}
/**
* Set new counter for LFU, if invalid, use -1
* @param new_lfu_count : new LFU count
*/
__device__ __forceinline__ void SetScaleLfuCountSync(int64_t new_lfu_count)
{
int scale = (new_lfu_count >= 0) ? 64 - __clzll(new_lfu_count) : 0;
scale = max(scale, kScaledCounterBits) - kScaledCounterBits;
int max_scale = WarpFindMaxScaleSync(scale);
// printf("threadIdx.x=%d, new_lfu_count=%ld, scale=%d, max_scale=%d\n", threadIdx.x,
// new_lfu_count, scale, max_scale);
int scale_lfu_count = new_lfu_count >> max_scale;
scale_lfu_count |= ((max_scale >> threadIdx.x) & 1) << kScaledCounterBits;
lfu_count_ = scale_lfu_count;
}
__device__ __forceinline__ void SetLocalID(int local_id)
{
if (local_id >= 0) {
if (IsInValid() || local_id != LocalID()) ClearModify();
tag_ &= ~(kLocalIDMask | kValidMask);
tag_ |= (local_id | kValidMask);
} else {
tag_ = 0;
}
}
__device__ __forceinline__ void SetModified(int local_id)
{
if (local_id >= 0 && local_id == LocalID()) { tag_ |= kModifiedMask; }
}
__device__ __forceinline__ void ClearCacheLine() { tag_ = 0; }
__device__ __forceinline__ void ClearModify() { tag_ &= ~kModifiedMask; }
static constexpr int kCacheSetSize = 32;
static constexpr int kLocalIDBits = 14;
static constexpr int kScaledCounterBits = 14;
static constexpr uint32_t kValidMask = (1U << 14);
static constexpr uint32_t kModifiedMask = (1U << 15);
static constexpr uint32_t kLocalIDMask = (1U << 14) - 1;
static constexpr uint32_t kCountMask = (1U << 14) - 1;
static constexpr uint32_t kScaleMask = (1U << 14);
uint32_t tag_;
uint32_t lfu_count_;
};
template <typename NodeIDT>
class CacheSetUpdater {
public:
static constexpr int kTopKRegisterCount = 4;
static constexpr int kCacheSetSize = CacheLineInfo::kCacheSetSize;
static constexpr int kScaledCounterBits = 14;
private:
using warp_bq_t =
raft::matrix::detail::select::warpsort::warp_sort_immediate<kCacheSetSize, false, int64_t, int>;
static constexpr int WARP_SIZE = 32;
static constexpr int BLOCK_SIZE = kCacheSetSize;
static_assert(kCacheSetSize == WARP_SIZE, "only support CacheSetSize==32,and BLOCK_SIZE==32\n");
public:
struct TempStorage {
int64_t store_keys[kCacheSetSize];
int store_values[kCacheSetSize];
};
;
/**
* From all invalid CacheSet, recompute lids to cache, and update cache_line_info.
* NOTE: data are not loaded, need to load after this function
* @param temp_storage : temp_storage
* @param cache_line_info : cache_line_info, will be updated.
* @param memory_lfu_counter : lfu_counter in memory of this cache set
* @param id_count : valid count in this cache set, most cases it is cache_set_coverage,
* maybe smaller than kCacheSetSize tailing cache set.
*/
__device__ __forceinline__ void ReComputeCache(TempStorage& temp_storage,
CacheLineInfo& cache_line_info,
int64_t* memory_lfu_counter,
int id_count)
{
if (id_count <= 0) return;
assert(cache_line_info.IsInValid());
// int base_idx = 0;
// int valid_count = 0;
FillCandidate<false>(nullptr, nullptr, memory_lfu_counter, 0, id_count, temp_storage, -1);
cache_line_info.ClearCacheLine();
cache_line_info.SetLocalID(candidate_local_id_);
cache_line_info.SetScaleLfuCountSync(candidate_local_id_ >= 0 ? candidate_lfu_count_ : 0);
}
/**
* Update cache set according to gids and inc_count
* @tparam NeedOutputLoadIDs : If need to output IDs that should be loaded into cache
* @tparam NeedOutputWriteBackIDs : If need to output IDs that should be write back to memory
* @param temp_storage : Work space storage
* @param cache_line_info : cache line info that already loaded, will be updated
* @param memory_lfu_counter : counter pointer of IDs that current cache set covers.
* @param gids : GIDs to update
* @param inc_count : same length as gids, the count of each GIDs to added, if nullptr, each GID
* add 1.
* @param need_load_to_cache_ids : output of IDs that should be loaded into cache, if not needed,
* use nullptr
* @param need_write_back_ids : output of IDs that should be write back to memory, if not needed,
* use nullptr
* @param set_start_id : start GID of current cache set
* @param id_count : count of GIDs
*/
template <bool NeedOutputLoadIDs, bool NeedOutputWriteBackIDs>
__device__ __forceinline__ void UpdateCache(TempStorage& temp_storage,
CacheLineInfo& cache_line_info,
int64_t* memory_lfu_counter,
const NodeIDT* gids,
const int* inc_count,
NodeIDT* need_load_to_cache_ids,
NodeIDT* need_write_back_ids,
int64_t set_start_id,
int id_count)
{
if (id_count <= 0) return;
candidate_lfu_count_ = -1;
candidate_local_id_ = -1;
int cached_local_id = cache_line_info.LocalID();
int has_local_id_count = FillCandidate(
gids, inc_count, memory_lfu_counter, set_start_id, id_count, temp_storage, cached_local_id);
// printf("[TopK init dump] threadIdx.x=%d, lfu_count=%ld, lid=%d, has_local_id_count = %d \n",
// threadIdx.x,
// candidate_lfu_count_,
// candidate_local_id_,
// has_local_id_count);
int64_t candidate_lfu_count0 = -1;
int candidate_local_id0 = -1;
unsigned int match_flag;
// match_flag = WarpMatchLocalIDPairSync(candidate_local_id_[0], cached_local_id);
int64_t estimated_lfu_count = cache_line_info.LfuCountSync();
// Valid AND NOT exist in update list
if (cached_local_id != -1 && has_local_id_count == 0) {
// cached key not updated, use estimated lfu_count from cache
candidate_lfu_count0 = estimated_lfu_count;
candidate_local_id0 = cached_local_id;
}
warp_bq_t warp_queue(kCacheSetSize);
warp_queue.add(candidate_lfu_count_, candidate_local_id_);
warp_queue.add(candidate_lfu_count0, candidate_local_id0);
warp_queue.done();
warp_queue.store(temp_storage.store_keys, temp_storage.store_values);
__syncthreads();
if (threadIdx.x < kCacheSetSize) {
candidate_lfu_count_ = temp_storage.store_keys[threadIdx.x];
candidate_local_id_ = temp_storage.store_values[threadIdx.x];
}
// printf("[TopK merge dump] threadIdx.x=%d, lfu_count=%ld, lid=%d\n", threadIdx.x,
// candidate_lfu_count_[0], candidate_local_id_[0]);
match_flag = WarpMatchLocalIDPairSync(candidate_local_id_, cached_local_id);
int from_lane = -1;
bool has_match = (cached_local_id >= 0 && match_flag != 0);
if (has_match) from_lane = __ffs(match_flag) - 1;
unsigned int can_update_mask = __ballot_sync(0xFFFFFFFF, !has_match);
unsigned int lower_thread_mask = (1U << threadIdx.x) - 1;
int updatable_cache_line_rank = !has_match ? __popc(can_update_mask & lower_thread_mask) : -1;
unsigned int new_match_flag = WarpMatchLocalIDPairSync(cached_local_id, candidate_local_id_);
// printf("tid=%d, cached_local_id=%d, candidate_local_id_=%d, new_match_flag=%x\n",
// threadIdx.x,
// cached_local_id,
// candidate_local_id_,
// new_match_flag);
bool new_need_slot = (candidate_local_id_ >= 0 && new_match_flag == 0);
unsigned int need_new_slot_mask = __ballot_sync(0xFFFFFFFF, new_need_slot);
int insert_data_rank = new_need_slot ? __popc(need_new_slot_mask & lower_thread_mask) : -1;
// printf("tid=%d, updatable_cache_line_rank=%d, insert_data_rank=%d\n", threadIdx.x,
// updatable_cache_line_rank, insert_data_rank);
unsigned int rank_match_flag =
WarpMatchLocalIDPairSync(insert_data_rank, updatable_cache_line_rank);
if (updatable_cache_line_rank != -1 && rank_match_flag != 0) {
from_lane = __ffs(rank_match_flag) - 1;
}
int src_lane_idx = from_lane >= 0 ? from_lane : 0;
int64_t new_lfu_count = __shfl_sync(0xFFFFFFFF, candidate_lfu_count_, src_lane_idx, 32);
int new_local_id = __shfl_sync(0xFFFFFFFF, candidate_local_id_, src_lane_idx, 32);
if (from_lane == -1) {
new_local_id = -1;
new_lfu_count = 0;
}
// printf("tid=%d, new_local_id=%d, new_lfu_count=%ld\n", threadIdx.x, new_local_id,
// new_lfu_count);
if (NeedOutputLoadIDs && need_load_to_cache_ids != nullptr) {
int new_cached_lid = -1;
if (new_need_slot) { new_cached_lid = candidate_local_id_; }
unsigned int load_cache_mask = __ballot_sync(0xFFFFFFFF, new_cached_lid >= 0);
int output_idx = __popc(load_cache_mask & ((1 << threadIdx.x) - 1));
int total_load_count = __popc(load_cache_mask);
if (new_need_slot) {
need_load_to_cache_ids[output_idx] = new_cached_lid + set_start_id;
// printf("tid=%d, load_cache_mask=%x, NeedLoadGIDs[%d]=%ld\n", threadIdx.x,
// load_cache_mask, output_idx, new_cached_lid + set_start_id);
}
if (threadIdx.x >= total_load_count && threadIdx.x < min(id_count, kCacheSetSize)) {
need_load_to_cache_ids[threadIdx.x] = -1;
}
}
if (NeedOutputWriteBackIDs && need_write_back_ids != nullptr) {
int write_back_lid = -1;
bool need_write_back = cached_local_id >= 0 && !has_match && cache_line_info.IsModified();
if (need_write_back) { write_back_lid = cache_line_info.LocalID(); }
unsigned int write_back_mask = __ballot_sync(0xFFFFFFFF, write_back_lid >= 0);
int output_idx = __popc(write_back_mask & ((1 << threadIdx.x) - 1));
if (need_write_back) {
need_write_back_ids[output_idx] = write_back_lid + set_start_id;
// printf("tid=%d, WriteBackGIDs[%d]=%ld\n", threadIdx.x, output_idx, write_back_lid +
// set_start_id);
}
int total_write_back_count = __popc(write_back_mask);
if (threadIdx.x >= total_write_back_count && threadIdx.x < min(id_count, kCacheSetSize)) {
need_write_back_ids[threadIdx.x] = -1;
}
}
cache_line_info.SetScaleLfuCountSync(new_lfu_count);
cache_line_info.SetLocalID(new_local_id);
}
private:
int64_t candidate_lfu_count_;
int candidate_local_id_;
template <bool IncCounter = true>
__device__ __forceinline__ int FillCandidate(const NodeIDT* gids,
const int* inc_freq_count,
int64_t* cache_set_coverage_counter,
int64_t cache_set_start_id,
int id_count,
TempStorage& temp_storage,
int cached_local_id)
{
warp_bq_t warp_queue(kCacheSetSize);
const int per_thread_lim = id_count + raft::laneId();
int has_local_id_count = 0;
for (int idx = threadIdx.x; idx < per_thread_lim; idx += BLOCK_SIZE) {
int local_id = -1;
int64_t candidate_lfu_count = -1;
int candidate_local_id = -1;
if (idx < id_count) {
local_id = gids != nullptr ? gids[idx] - cache_set_start_id : idx;
candidate_lfu_count = cache_set_coverage_counter[local_id];
if (IncCounter) {
int id_inc_count = inc_freq_count != nullptr ? inc_freq_count[idx] : 1;
candidate_lfu_count += id_inc_count;
cache_set_coverage_counter[local_id] = candidate_lfu_count;
}
candidate_local_id = local_id;
}
unsigned int local_id_match_mask = WarpMatchLocalIDPairSync(local_id, cached_local_id);
has_local_id_count += ((cached_local_id != -1) ? __popc(local_id_match_mask) : 0);
warp_queue.add(candidate_lfu_count, candidate_local_id);
}
warp_queue.done();
warp_queue.store(temp_storage.store_keys, temp_storage.store_values);
__syncthreads();
if (threadIdx.x < kCacheSetSize) {
candidate_lfu_count_ = temp_storage.store_keys[threadIdx.x];
candidate_local_id_ = temp_storage.store_values[threadIdx.x];
}
__syncthreads();
return has_local_id_count;
}
};
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/gather_cached_func.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime_api.h>
#include <stdint.h>
#include <wholememory/tensor_description.h>
#include <wholememory/wholememory.h>
namespace wholememory_ops {
wholememory_error_code_t gather_cached_func(wholememory_gref_t padded_embedding_gref,
wholememory_tensor_description_t* embedding_desc,
wholememory_gref_t cached_embedding_gref,
wholememory_tensor_description_t* cached_embedding_desc,
wholememory_gref_t cache_line_tag_gref,
void* indices,
wholememory_tensor_description_t* indices_desc,
void* output,
wholememory_tensor_description_t* output_desc,
int cache_set_coverage,
int64_t cache_start_gid,
int64_t raw_start_gid,
cudaStream_t stream);
wholememory_error_code_t try_gather_cached_func(
wholememory_gref_t cached_embedding_gref,
wholememory_tensor_description_t* cached_embedding_desc,
wholememory_gref_t cache_line_tag_gref,
void* indices,
wholememory_tensor_description_t* indices_desc,
void* hit_indices,
void* miss_indices,
void* output,
wholememory_tensor_description_t* output_desc,
int cache_set_coverage,
int64_t cache_start_gid,
cudaStream_t stream);
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/gather_func_impl_integer_data_int32_indices.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gather_scatter_func.cuh"
#include <wholememory/wholememory.h>
#include "logger.hpp"
#include "wholememory_ops/register.hpp"
namespace wholememory_ops {
template <typename EmbeddingT, typename OutputT>
void gather_integer_int32_temp_func(wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
void* indices,
int64_t indice_count,
void* output,
wholememory_matrix_description_t output_desc,
cudaStream_t stream)
{
gather_temp_func<EmbeddingT, int32_t, OutputT>(
embedding_gref, embedding_desc, indices, indice_count, output, output_desc, stream);
}
REGISTER_DISPATCH_TWO_TYPES(GatherFuncIntegerInt32,
gather_integer_int32_temp_func,
ALLSINT,
ALLSINT)
wholememory_error_code_t gather_integer_int32_func(wholememory_gref_t embedding_gref,
wholememory_matrix_description_t embedding_desc,
void* indices,
wholememory_array_description_t indices_desc,
void* output,
wholememory_matrix_description_t output_desc,
cudaStream_t stream)
{
try {
WHOLEMEMORY_CHECK(wholememory_dtype_is_integer_number(embedding_desc.dtype));
WHOLEMEMORY_CHECK(wholememory_dtype_is_integer_number(output_desc.dtype));
WHOLEMEMORY_CHECK(indices_desc.dtype == WHOLEMEMORY_DT_INT);
DISPATCH_TWO_TYPES(
embedding_desc.dtype,
output_desc.dtype,
GatherFuncIntegerInt32,
embedding_gref,
embedding_desc,
static_cast<char*>(indices) +
indices_desc.storage_offset * wholememory_dtype_get_element_size(indices_desc.dtype),
indices_desc.size,
output,
output_desc,
stream);
} catch (const wholememory::cuda_error& wle) {
WHOLEMEMORY_ERROR("gather CUDA LOGIC Error %s\n", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (const wholememory::logic_error& le) {
WHOLEMEMORY_ERROR("gather LOGIC Error %s\n", le.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (...) {
return WHOLEMEMORY_LOGIC_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory_ops/functions/bucket_ids_func.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/tensor_description.h>
#include <wholememory/wholememory.h>
namespace wholememory_ops {
wholememory_error_code_t bucket_ids_for_ranks(void* indices,
wholememory_array_description_t indice_desc,
int64_t* dev_rank_id_count_ptr,
size_t embedding_entry_count_per_rank,
int world_size,
cudaDeviceProp* prop,
cudaStream_t stream);
} // namespace wholememory_ops
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/file_io.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/wholememory.h>
namespace wholememory {
wholememory_error_code_t load_file_to_handle(wholememory_handle_t wholememory_handle,
size_t memory_offset,
size_t memory_entry_stride,
size_t entry_size,
const char** file_names,
int file_count) noexcept;
wholememory_error_code_t store_handle_to_file(wholememory_handle_t wholememory_handle,
size_t memory_offset,
size_t memory_entry_stride,
size_t entry_size,
const char* local_file_name) noexcept;
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/nccl_comms.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wholememory/nccl_comms.hpp"
#include <cuda_runtime.h>
#include <nccl.h>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <algorithm>
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <exception>
#include <memory>
#include <thread>
#include <raft/core/comms.hpp>
#include "cuda_macros.hpp"
#include <cstring>
#include <raft/comms/detail/util.hpp>
#include <raft/core/error.hpp>
namespace wholememory {
nccl_comms::nccl_comms(ncclComm_t nccl_comm, int num_ranks, int rank, cudaStream_t rmm_stream)
: nccl_comm_(nccl_comm), rmm_stream_(rmm_stream), num_ranks_(num_ranks), rank_(rank)
{
initialize();
};
void nccl_comms::initialize()
{
WM_CUDA_CHECK(cudaMallocHost(&host_send_buffer_, HOST_BUFFER_SIZE_PER_RANK * num_ranks_));
WM_CUDA_CHECK(cudaMallocHost(&host_recv_buffer_, HOST_BUFFER_SIZE_PER_RANK * num_ranks_));
WM_CUDA_CHECK(cudaMalloc(&buf_, sizeof(int)));
}
nccl_comms::~nccl_comms()
{
WM_CUDA_CHECK_NO_THROW(cudaFreeHost(host_send_buffer_));
WM_CUDA_CHECK_NO_THROW(cudaFreeHost(host_recv_buffer_));
WM_CUDA_CHECK_NO_THROW(cudaFree(buf_));
}
static size_t get_nccl_datatype_size(ncclDataType_t datatype)
{
switch (datatype) {
case ncclInt8: return 1;
case ncclUint8: return 1;
case ncclInt32: return 4;
case ncclUint32: return 4;
case ncclInt64: return 8;
case ncclUint64: return 8;
case ncclFloat16: return 2;
case ncclFloat32: return 4;
case ncclFloat64: return 8;
#if defined(__CUDA_BF16_TYPES_EXIST__)
case ncclBfloat16: return 2;
#endif
default: WHOLEMEMORY_FAIL("get_nccl_datatype_size");
}
return WHOLEMEMORY_SUCCESS;
}
void nccl_comms::barrier() const
{
allreduce(buf_, buf_, 1, ncclInt32, ncclSum, rmm_stream_);
WM_CUDA_CHECK(cudaStreamSynchronize(rmm_stream_));
}
void nccl_comms::allreduce(const void* sendbuff,
void* recvbuff,
size_t count,
ncclDataType_t datatype,
ncclRedOp_t op,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclAllReduce(sendbuff, recvbuff, count, datatype, op, nccl_comm_, stream));
}
void nccl_comms::host_allreduce(
const void* sendbuff, void* recvbuff, size_t count, ncclDataType_t datatype, ncclRedOp_t op) const
{
const size_t datatype_size = get_nccl_datatype_size(datatype);
const size_t max_elt_count = HOST_BUFFER_SIZE_PER_RANK * num_ranks_ / datatype_size;
for (size_t offset = 0; offset < count; offset += max_elt_count) {
size_t elt_count = (count - offset > max_elt_count) ? max_elt_count : count - offset;
std::memcpy(host_send_buffer_,
static_cast<const char*>(sendbuff) + datatype_size * offset,
elt_count * datatype_size);
RAFT_NCCL_TRY(ncclAllReduce(
host_send_buffer_, host_recv_buffer_, elt_count, datatype, op, nccl_comm_, rmm_stream_));
WM_CUDA_CHECK(cudaStreamSynchronize(rmm_stream_));
std::memcpy(static_cast<char*>(recvbuff) + datatype_size * offset,
host_recv_buffer_,
elt_count * datatype_size);
}
}
void nccl_comms::bcast(
void* buff, size_t count, ncclDataType_t datatype, int root, cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclBroadcast(buff, buff, count, datatype, root, nccl_comm_, stream));
}
void nccl_comms::bcast(const void* sendbuff,
void* recvbuff,
size_t count,
ncclDataType_t datatype,
int root,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclBroadcast(sendbuff, recvbuff, count, datatype, root, nccl_comm_, stream));
}
void nccl_comms::host_bcast(
const void* sendbuff, void* recvbuff, size_t count, ncclDataType_t datatype, int root) const
{
const size_t datatype_size = get_nccl_datatype_size(datatype);
const size_t max_elt_count = HOST_BUFFER_SIZE_PER_RANK * num_ranks_ / datatype_size;
for (size_t offset = 0; offset < count; offset += max_elt_count) {
size_t elt_count = (count - offset > max_elt_count) ? max_elt_count : count - offset;
std::memcpy(host_send_buffer_,
static_cast<const char*>(sendbuff) + datatype_size * offset,
elt_count * datatype_size);
RAFT_NCCL_TRY(ncclBroadcast(
host_send_buffer_, host_recv_buffer_, elt_count, datatype, root, nccl_comm_, rmm_stream_));
WM_CUDA_CHECK(cudaStreamSynchronize(rmm_stream_));
std::memcpy(static_cast<char*>(recvbuff) + datatype_size * offset,
host_recv_buffer_,
elt_count * datatype_size);
}
}
void nccl_comms::host_bcast(void* buff, size_t count, ncclDataType_t datatype, int root) const
{
host_bcast(buff, buff, count, datatype, root);
}
void nccl_comms::reduce(const void* sendbuff,
void* recvbuff,
size_t count,
ncclDataType_t datatype,
ncclRedOp_t op,
int root,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclReduce(sendbuff, recvbuff, count, datatype, op, root, nccl_comm_, stream));
}
void nccl_comms::host_reduce(const void* sendbuff,
void* recvbuff,
size_t count,
ncclDataType_t datatype,
ncclRedOp_t op,
int root) const
{
const size_t datatype_size = get_nccl_datatype_size(datatype);
const size_t max_elt_count = HOST_BUFFER_SIZE_PER_RANK * num_ranks_ / datatype_size;
for (size_t offset = 0; offset < count; offset += max_elt_count) {
size_t elt_count = (count - offset > max_elt_count) ? max_elt_count : count - offset;
std::memcpy(host_send_buffer_,
static_cast<const char*>(sendbuff) + datatype_size * offset,
elt_count * datatype_size);
RAFT_NCCL_TRY(ncclReduce(host_send_buffer_,
host_recv_buffer_,
elt_count,
datatype,
op,
root,
nccl_comm_,
rmm_stream_));
WM_CUDA_CHECK(cudaStreamSynchronize(rmm_stream_));
if (get_rank() == root) {
std::memcpy(static_cast<char*>(recvbuff) + datatype_size * offset,
host_recv_buffer_,
elt_count * datatype_size);
}
}
}
void nccl_comms::allgather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
ncclDataType_t datatype,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclAllGather(sendbuff, recvbuff, sendcount, datatype, nccl_comm_, stream));
}
void nccl_comms::host_allgather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
ncclDataType_t datatype) const
{
const size_t datatype_size = get_nccl_datatype_size(datatype);
const size_t max_elt_count = HOST_BUFFER_SIZE_PER_RANK / datatype_size;
for (size_t offset = 0; offset < sendcount; offset += max_elt_count) {
size_t elt_count = (sendcount - offset > max_elt_count) ? max_elt_count : sendcount - offset;
std::memcpy(host_send_buffer_,
static_cast<const char*>(sendbuff) + datatype_size * offset,
elt_count * datatype_size);
RAFT_NCCL_TRY(ncclAllGather(
host_send_buffer_, host_recv_buffer_, sendcount, datatype, nccl_comm_, rmm_stream_));
WM_CUDA_CHECK(cudaStreamSynchronize(rmm_stream_));
for (int i = 0; i < get_size(); i++) {
std::memcpy(
static_cast<char*>(recvbuff) + datatype_size * offset + i * sendcount * datatype_size,
host_recv_buffer_ + i * elt_count * datatype_size,
elt_count * datatype_size);
}
}
}
void nccl_comms::allgatherv(const void* sendbuf,
void* recvbuf,
const size_t* recvcounts,
const size_t* displs,
ncclDataType_t datatype,
cudaStream_t stream) const
{
// From: "An Empirical Evaluation of Allgatherv on Multi-GPU Systems" -
// https://arxiv.org/pdf/1812.05964.pdf Listing 1 on page 4.
WHOLEMEMORY_EXPECTS(
num_ranks_ <= 2048,
"# NCCL operations between ncclGroupStart & ncclGroupEnd shouldn't exceed 2048.");
RAFT_NCCL_TRY(ncclGroupStart());
for (int root = 0; root < num_ranks_; ++root) {
size_t dtype_size = get_nccl_datatype_size(datatype);
RAFT_NCCL_TRY(ncclBroadcast(sendbuf,
static_cast<char*>(recvbuf) + displs[root] * dtype_size,
recvcounts[root],
datatype,
root,
nccl_comm_,
stream));
}
RAFT_NCCL_TRY(ncclGroupEnd());
}
void nccl_comms::host_allgatherv(const void* sendbuf,
void* recvbuf,
const size_t* recvcounts,
const size_t* displs,
ncclDataType_t datatype) const
{
size_t dtype_size = get_nccl_datatype_size(datatype);
for (int root = 0; root < num_ranks_; ++root) {
host_bcast(sendbuf,
static_cast<char*>(recvbuf) + displs[root] * dtype_size,
recvcounts[root],
datatype,
root);
}
}
void nccl_comms::gather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
ncclDataType_t datatype,
int root,
cudaStream_t stream) const
{
size_t dtype_size = get_nccl_datatype_size(datatype);
RAFT_NCCL_TRY(ncclGroupStart());
if (get_rank() == root) {
for (int r = 0; r < get_size(); ++r) {
RAFT_NCCL_TRY(ncclRecv(static_cast<char*>(recvbuff) + sendcount * r * dtype_size,
sendcount,
datatype,
r,
nccl_comm_,
stream));
}
}
RAFT_NCCL_TRY(ncclSend(sendbuff, sendcount, datatype, root, nccl_comm_, stream));
RAFT_NCCL_TRY(ncclGroupEnd());
}
void nccl_comms::host_gather(
const void* sendbuff, void* recvbuff, size_t sendcount, ncclDataType_t datatype, int root) const
{
const size_t datatype_size = get_nccl_datatype_size(datatype);
const size_t max_elt_count = HOST_BUFFER_SIZE_PER_RANK / datatype_size;
for (size_t offset = 0; offset < sendcount; offset += max_elt_count) {
size_t elt_count = (sendcount - offset > max_elt_count) ? max_elt_count : sendcount - offset;
std::memcpy(host_send_buffer_,
static_cast<const char*>(sendbuff) + datatype_size * offset,
elt_count * datatype_size);
gather(host_send_buffer_, host_recv_buffer_, sendcount, datatype, root, rmm_stream_);
WM_CUDA_CHECK(cudaStreamSynchronize(rmm_stream_));
if (rank_ == root) {
for (int i = 0; i < num_ranks_; i++) {
std::memcpy(
static_cast<char*>(recvbuff) + datatype_size * offset + i * sendcount * datatype_size,
host_recv_buffer_ + i * elt_count * datatype_size,
elt_count * datatype_size);
}
}
}
}
void nccl_comms::gatherv(const void* sendbuff,
void* recvbuff,
size_t sendcount,
const size_t* recvcounts,
const size_t* displs,
ncclDataType_t datatype,
int root,
cudaStream_t stream) const
{
size_t dtype_size = get_nccl_datatype_size(datatype);
RAFT_NCCL_TRY(ncclGroupStart());
if (get_rank() == root) {
for (int r = 0; r < get_size(); ++r) {
RAFT_NCCL_TRY(ncclRecv(static_cast<char*>(recvbuff) + displs[r] * dtype_size,
recvcounts[r],
datatype,
r,
nccl_comm_,
stream));
}
}
RAFT_NCCL_TRY(ncclSend(sendbuff, sendcount, datatype, root, nccl_comm_, stream));
RAFT_NCCL_TRY(ncclGroupEnd());
}
void nccl_comms::reducescatter(const void* sendbuff,
void* recvbuff,
size_t recvcount,
ncclDataType_t datatype,
ncclRedOp_t op,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclReduceScatter(sendbuff, recvbuff, recvcount, datatype, op, nccl_comm_, stream));
}
void nccl_comms::alltoall(const void* sendbuff,
void* recvbuff,
size_t sendcount,
ncclDataType_t datatype,
cudaStream_t stream) const
{
size_t dtype_size = get_nccl_datatype_size(datatype);
RAFT_NCCL_TRY(ncclGroupStart());
for (int r = 0; r < get_size(); ++r) {
RAFT_NCCL_TRY(ncclRecv(static_cast<char*>(recvbuff) + sendcount * r * dtype_size,
sendcount,
datatype,
r,
nccl_comm_,
stream));
}
for (int r = 0; r < get_size(); ++r) {
RAFT_NCCL_TRY(ncclSend(static_cast<const char*>(sendbuff) + sendcount * r * dtype_size,
sendcount,
datatype,
r,
nccl_comm_,
stream));
}
RAFT_NCCL_TRY(ncclGroupEnd());
}
void nccl_comms::host_alltoall(const void* sendbuff,
void* recvbuff,
size_t sendcount,
ncclDataType_t datatype) const
{
const size_t datatype_size = get_nccl_datatype_size(datatype);
const size_t max_elt_count = HOST_BUFFER_SIZE_PER_RANK / datatype_size;
for (size_t offset = 0; offset < sendcount; offset += max_elt_count) {
size_t elt_count = (sendcount - offset > max_elt_count) ? max_elt_count : sendcount - offset;
for (int i = 0; i < num_ranks_; i++) {
std::memcpy(
host_send_buffer_ + i * elt_count * datatype_size,
static_cast<const char*>(sendbuff) + datatype_size * offset + i * sendcount * datatype_size,
elt_count * datatype_size);
}
alltoall(host_send_buffer_, host_recv_buffer_, sendcount, datatype, rmm_stream_);
WM_CUDA_CHECK(cudaStreamSynchronize(rmm_stream_));
for (int i = 0; i < num_ranks_; i++) {
std::memcpy(
static_cast<char*>(recvbuff) + datatype_size * offset + i * sendcount * datatype_size,
host_recv_buffer_ + i * elt_count * datatype_size,
elt_count * datatype_size);
}
}
}
void nccl_comms::alltoallv(const void* sendbuff,
void* recvbuff,
const size_t* sendcounts,
const size_t* senddispls,
const size_t* recvcounts,
const size_t* recvdispls,
ncclDataType_t datatype,
cudaStream_t stream) const
{
size_t dtype_size = get_nccl_datatype_size(datatype);
RAFT_NCCL_TRY(ncclGroupStart());
for (int r = 0; r < get_size(); ++r) {
RAFT_NCCL_TRY(ncclRecv(static_cast<char*>(recvbuff) + recvdispls[r] * dtype_size,
recvcounts[r],
datatype,
r,
nccl_comm_,
stream));
}
for (int r = 0; r < get_size(); ++r) {
RAFT_NCCL_TRY(ncclSend(static_cast<const char*>(sendbuff) + senddispls[r] * dtype_size,
sendcounts[r],
datatype,
r,
nccl_comm_,
stream));
}
RAFT_NCCL_TRY(ncclGroupEnd());
}
wholememory_error_code_t nccl_comms::sync_stream(cudaStream_t stream) const
{
if (raft::comms::detail::nccl_sync_stream(nccl_comm_, stream) != raft::comms::status_t::SUCCESS) {
return WHOLEMEMORY_COMMUNICATION_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t nccl_comms::sync_stream() const { return sync_stream(rmm_stream_); }
// if a thread is sending & receiving at the same time, use device_sendrecv to avoid deadlock
void nccl_comms::device_send(const void* send_buf,
size_t send_size,
int dest,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclSend(send_buf, send_size, ncclUint8, dest, nccl_comm_, stream));
}
// if a thread is sending & receiving at the same time, use device_sendrecv to avoid deadlock
void nccl_comms::device_recv(void* recv_buf,
size_t recv_size,
int source,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclRecv(recv_buf, recv_size, ncclUint8, source, nccl_comm_, stream));
}
void nccl_comms::device_sendrecv(const void* sendbuf,
size_t sendsize,
int dest,
void* recvbuf,
size_t recvsize,
int source,
cudaStream_t stream) const
{
// ncclSend/ncclRecv pair needs to be inside ncclGroupStart/ncclGroupEnd to avoid deadlock
RAFT_NCCL_TRY(ncclGroupStart());
RAFT_NCCL_TRY(ncclSend(sendbuf, sendsize, ncclUint8, dest, nccl_comm_, stream));
RAFT_NCCL_TRY(ncclRecv(recvbuf, recvsize, ncclUint8, source, nccl_comm_, stream));
RAFT_NCCL_TRY(ncclGroupEnd());
}
void nccl_comms::device_multicast_sendrecv(const void* sendbuf,
std::vector<size_t> const& sendsizes,
std::vector<size_t> const& sendoffsets,
std::vector<int> const& dests,
void* recvbuf,
std::vector<size_t> const& recvsizes,
std::vector<size_t> const& recvoffsets,
std::vector<int> const& sources,
cudaStream_t stream) const
{
// ncclSend/ncclRecv pair needs to be inside ncclGroupStart/ncclGroupEnd to avoid deadlock
RAFT_NCCL_TRY(ncclGroupStart());
for (size_t i = 0; i < sendsizes.size(); ++i) {
RAFT_NCCL_TRY(ncclSend(static_cast<const char*>(sendbuf) + sendoffsets[i],
sendsizes[i],
ncclUint8,
dests[i],
nccl_comm_,
stream));
}
for (size_t i = 0; i < recvsizes.size(); ++i) {
RAFT_NCCL_TRY(ncclRecv(static_cast<char*>(recvbuf) + recvoffsets[i],
recvsizes[i],
ncclUint8,
sources[i],
nccl_comm_,
stream));
}
RAFT_NCCL_TRY(ncclGroupEnd());
}
void nccl_comms::group_start() const { RAFT_NCCL_TRY(ncclGroupStart()); }
void nccl_comms::group_end() const { RAFT_NCCL_TRY(ncclGroupEnd()); }
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/communicator.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
#include <cuda_runtime_api.h>
#include <nccl.h>
#include <cstring>
#include <map>
#include <mutex>
#include <vector>
#include <wholememory/tensor_description.h>
#include <wholememory/wholememory.h>
#include "cuda_macros.hpp"
namespace wholememory {
class nccl_comms;
}
struct wholememory_comm_ {
wholememory_comm_(ncclComm_t nccl_comm, int num_ranks, int rank, cudaStream_t stream);
~wholememory_comm_();
void barrier() const;
void allreduce(const void* sendbuff,
void* recvbuff,
size_t count,
wholememory_dtype_t datatype,
ncclRedOp_t op,
cudaStream_t stream) const;
void host_allreduce(const void* sendbuff,
void* recvbuff,
size_t count,
wholememory_dtype_t datatype,
ncclRedOp_t op) const;
void bcast(
void* buff, size_t count, wholememory_dtype_t datatype, int root, cudaStream_t stream) const;
void bcast(const void* sendbuff,
void* recvbuff,
size_t count,
wholememory_dtype_t datatype,
int root,
cudaStream_t stream) const;
void host_bcast(const void* sendbuff,
void* recvbuff,
size_t count,
wholememory_dtype_t datatype,
int root) const;
void host_bcast(void* buff, size_t count, wholememory_dtype_t datatype, int root) const;
void reduce(const void* sendbuff,
void* recvbuff,
size_t count,
wholememory_dtype_t datatype,
ncclRedOp_t op,
int root,
cudaStream_t stream) const;
void host_reduce(const void* sendbuff,
void* recvbuff,
size_t count,
wholememory_dtype_t datatype,
ncclRedOp_t op,
int root) const;
void allgather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
wholememory_dtype_t datatype,
cudaStream_t stream) const;
void host_allgather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
wholememory_dtype_t datatype) const;
void allgatherv(const void* sendbuf,
void* recvbuf,
const size_t* recvcounts,
const size_t* displs,
wholememory_dtype_t datatype,
cudaStream_t stream) const;
void host_allgatherv(const void* sendbuf,
void* recvbuf,
const size_t* recvcounts,
const size_t* displs,
wholememory_dtype_t datatype) const;
void gather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
wholememory_dtype_t datatype,
int root,
cudaStream_t stream) const;
void host_gather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
wholememory_dtype_t datatype,
int root) const;
void gatherv(const void* sendbuff,
void* recvbuff,
size_t sendcount,
const size_t* recvcounts,
const size_t* displs,
wholememory_dtype_t datatype,
int root,
cudaStream_t stream) const;
void reducescatter(const void* sendbuff,
void* recvbuff,
size_t recvcount,
wholememory_dtype_t datatype,
ncclRedOp_t op,
cudaStream_t stream) const;
void alltoall(const void* sendbuff,
void* recvbuff,
size_t sendcount,
wholememory_dtype_t datatype,
cudaStream_t stream) const;
void host_alltoall(const void* sendbuff,
void* recvbuff,
size_t sendcount,
wholememory_dtype_t datatype) const;
void alltoallv(const void* sendbuff,
void* recvbuff,
const size_t* sendcounts,
const size_t* senddispls,
const size_t* recvcounts,
const size_t* recvdispls,
wholememory_dtype_t datatype,
cudaStream_t stream) const;
wholememory_error_code_t sync_stream(cudaStream_t stream) const;
wholememory_error_code_t sync_stream() const;
// if a thread is sending & receiving at the same time, use device_sendrecv to avoid deadlock
void device_send(const void* send_buf, size_t send_size, int dest, cudaStream_t stream) const;
// if a thread is sending & receiving at the same time, use device_sendrecv to avoid deadlock
void device_recv(void* recv_buf, size_t recv_size, int source, cudaStream_t stream) const;
void device_sendrecv(const void* sendbuf,
size_t sendsize,
int dest,
void* recvbuf,
size_t recvsize,
int source,
cudaStream_t stream) const;
void device_multicast_sendrecv(const void* sendbuf,
std::vector<size_t> const& sendsizes,
std::vector<size_t> const& sendoffsets,
std::vector<int> const& dests,
void* recvbuf,
std::vector<size_t> const& recvsizes,
std::vector<size_t> const& recvoffsets,
std::vector<int> const& sources,
cudaStream_t stream) const;
bool is_intranode() const;
bool support_type_location(wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location) const;
void group_start() const;
void group_end() const;
wholememory::nccl_comms* raft_nccl_comm;
cudaStream_t comm_stream = nullptr;
cudaEvent_t cuda_event = nullptr;
ncclComm_t raw_nccl_comm = nullptr;
int world_rank = 0;
int world_size = 1;
int intra_node_first_rank = -1;
int intra_node_rank = -1;
int intra_node_rank_num = 0;
int intra_node_first_rank_pid = -1;
int comm_id = -1;
int dev_id = -1;
int local_gpu_ids[16] = {0};
size_t alloc_granularity = 2 * 1024 * 1024UL;
std::mutex mu;
std::map<int, wholememory_handle_t> wholememory_map;
} __attribute__((aligned(64)));
template <typename TypeT>
inline bool wm_comm_check_all_same(wholememory_comm_t comm, const TypeT& t)
{
std::unique_ptr<TypeT[]> t_array(new TypeT[comm->world_size]());
comm->host_allgather(&t, t_array.get(), sizeof(TypeT), WHOLEMEMORY_DT_INT8);
for (int r = 0; r < comm->world_size; r++) {
if (t_array.get()[r] != t) return false;
}
return true;
}
template <>
inline bool wm_comm_check_all_same(wholememory_comm_t comm, const std::string& str)
{
size_t str_len = str.size();
if (!wm_comm_check_all_same(comm, str_len)) return false;
std::string cat_str;
cat_str.resize(str_len * comm->world_size, '\0');
comm->host_allgather(
str.data(), const_cast<char*>(cat_str.c_str()), str_len, WHOLEMEMORY_DT_INT8);
for (int r = 0; r < comm->world_size; r++) {
if (std::strncmp(str.data(), cat_str.data() + r * str_len, str_len) != 0) return false;
}
return true;
}
#define WM_COMM_CHECK_ALL_SAME(comm, data) \
do { \
if (!wm_comm_check_all_same(comm, data)) { WHOLEMEMORY_FATAL("COMM_CHECK_ALL_SAME failed."); } \
} while (0)
namespace wholememory {
wholememory_error_code_t create_unique_id(wholememory_unique_id_t* unique_id) noexcept;
wholememory_error_code_t create_communicator(wholememory_comm_t* comm,
wholememory_unique_id_t unique_id,
int rank,
int size) noexcept;
wholememory_error_code_t destroy_communicator_locked(wholememory_comm_t comm) noexcept;
wholememory_error_code_t destroy_communicator(wholememory_comm_t comm) noexcept;
wholememory_error_code_t communicator_support_type_location(
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location) noexcept;
wholememory_error_code_t destroy_all_communicators() noexcept;
wholememory_error_code_t communicator_get_rank(int* rank, wholememory_comm_t comm) noexcept;
wholememory_error_code_t communicator_get_size(int* size, wholememory_comm_t comm) noexcept;
void communicator_barrier(wholememory_comm_t comm);
bool is_intranode_communicator(wholememory_comm_t comm) noexcept;
std::string get_temporary_directory_path(wholememory_comm_t comm);
std::string get_shm_prefix(wholememory_comm_t comm);
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/memory_handle.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "memory_handle.hpp"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <fcntl.h>
#include <sys/ipc.h>
#include <sys/mman.h>
#include <sys/shm.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <unistd.h>
#include <mutex>
#include <vector>
#include "cuda_macros.hpp"
#include "error.hpp"
#include "integer_utils.hpp"
#include "logger.hpp"
#include "system_info.hpp"
namespace wholememory {
enum wm_memory_op : int32_t {
WM_MEM_OP_CREATE = 0xEEEEE,
WM_MEM_OP_EXCHANGE_ID,
WM_MEM_OP_DESTROY,
};
class wholememory_impl {
public:
wholememory_impl(wholememory_handle_t wholememory_handle,
size_t total_size,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
size_t data_granularity)
: handle_(wholememory_handle),
comm_(comm),
type_(memory_type),
location_(memory_location),
total_size_(total_size),
data_granularity_(data_granularity)
{
}
wholememory_impl() = delete;
wholememory_impl(const wholememory_impl&) = delete;
wholememory_impl(const wholememory_impl&&) = delete;
virtual ~wholememory_impl() = default;
[[nodiscard]] wholememory_memory_type_t get_type() const { return type_; }
[[nodiscard]] wholememory_memory_location_t get_location() const { return location_; }
[[nodiscard]] wholememory_comm_t get_comm() const { return comm_; }
[[nodiscard]] size_t total_size() const { return total_size_; }
[[nodiscard]] size_t data_granularity() const { return data_granularity_; }
virtual void create_memory() = 0;
virtual void destroy_memory() noexcept = 0;
[[nodiscard]] virtual void* get_continuous_mapping_pointer() const noexcept { return nullptr; }
[[nodiscard]] virtual wholememory_gref_t get_global_reference() const noexcept
{
wholememory_gref_t gref{};
gref.pointer = nullptr;
gref.stride = 0;
return gref;
}
virtual bool contains_pointer(const void* ptr) const = 0;
void get_local_memory(void** local_ptr, size_t* local_size, size_t* local_offset) const
{
if (local_ptr != nullptr) *local_ptr = local_partition_memory_pointer_;
if (local_size != nullptr) *local_size = rank_partition_strategy_.local_mem_size;
if (local_offset != nullptr) *local_offset = rank_partition_strategy_.local_mem_offset;
}
virtual bool get_rank_memory(void** rank_memory_ptr,
size_t* rank_memory_size,
size_t* rank_memory_offset,
int rank) const noexcept
{
*rank_memory_ptr = nullptr;
*rank_memory_size = 0;
*rank_memory_offset = 0;
return false;
}
[[nodiscard]] size_t get_partition_stride() const
{
return rank_partition_strategy_.partition_mem_stride;
}
protected:
// In WholeMemory, memory is first allocated by one or all ranks, and then partition the whole
// memory to each rank. Each rank can direct access its partition of memory and is response for
// memory operations on that. In some WholeMemory types like CONTINUOUS or CHUNKED, memory of
// other ranks may also be mapped to current rank. So current rank may access the whole memory,
// but in a parallel loading case, it is not its response to process memory other than the
// partition of memory determined by memory partition strategy.
//
// Memory partitioning is decoupled with memory allocation, memory allocation may have different
// strategies.
//
// The following 3 functions are for memory allocation strategies
// first rank responsible for all memory allocation, continuous or chunked host shared memory may
// use this mode.
void first_rank_allocate_all_strategy();
// each rank allocate exactly the same size, chunked device memory or nccl memory may use this
// mode.
void each_rank_same_chunk_strategy();
// each rank allocate a multiple of pages, and map the whole memory by page, continuous device
// memory use this mode.
void each_rank_multiple_page_strategy();
// For now, memory rank partitioning strategy is the same for all WholeMemory types.
// Each rank is response for memory of size local_mem_size_ starting from local_mem_offset_.
// And local_mem_offset_ can also be got by rank_mem_stride_ * rank for ranks with local_mem_size_
// != 0 That means for a valid memory offset position, offset / rank_mem_stride_ can be used to
// get the rank which is responsible for it.
void generate_rank_partition_strategy();
/*
* ++---------------------------------------------------------------------------------------++
* || Type || CONTINUOUS || CHUNKED || DISTRIBUTED ||
* ++--------------+------------------------------------------------------------------------++
* || Location || DEVICE | HOST || DEVICE | HOST || DEVICE | HOST ||
* ++---------------------------------------------------------------------------------------++
* || Allocated by || EACH | FIRST || EACH | FIRST || EACH | EACH ||
* ++---------------------------------------------------------------------------------------++
* || Allocate API || Driver | Host || Runtime | Host || Runtime | Runtime ||
* ++---------------------------------------------------------------------------------------++
* || IPC Mapping || Unix fd | mmap || cudaIpc | mmap || No IPC map | No IPC map ||
* ++---------------------------------------------------------------------------------------++
*/
wholememory_handle_t handle_;
wholememory_comm_t comm_;
wholememory_memory_type_t type_;
wholememory_memory_location_t location_;
// raw user input size, real allocation may be larger than this.
size_t total_size_;
size_t data_granularity_;
struct alloc_strategy {
size_t total_alloc_size = 0;
size_t local_alloc_size = 0;
size_t alignment = 0;
std::vector<size_t> alloc_offsets;
std::vector<size_t> alloc_sizes;
} alloc_strategy_;
struct partition_strategy {
// size of memory this rank is responsible for
size_t local_mem_size = 0;
// start location of the memory this rank is responsible for
size_t local_mem_offset = 0;
size_t partition_mem_stride = 0;
} rank_partition_strategy_;
void* local_partition_memory_pointer_ = nullptr;
void get_rank_partition_info(size_t* rank_mem_size,
size_t* rank_mem_start,
int rank) const noexcept
{
WHOLEMEMORY_CHECK_NOTHROW(rank >= 0 && rank <= comm_->world_size);
size_t rank_mem_part_start =
std::min(rank_partition_strategy_.partition_mem_stride * rank, total_size_);
size_t rank_mem_part_end =
std::min(rank_partition_strategy_.partition_mem_stride * (rank + 1), total_size_);
if (rank_mem_size != nullptr) *rank_mem_size = rank_mem_part_end - rank_mem_part_start;
if (rank_mem_start != nullptr) *rank_mem_start = rank_mem_part_start;
}
static constexpr size_t HUGE_PAGE_THRESHOLD = 16UL * 1024UL * 1024UL * 1024UL;
static constexpr size_t HUGE_PAGE_SIZE = 512UL * 1024UL * 1024UL;
};
struct wholememory_vma_data {
wholememory_handle_t wholememory_handle;
const void* start_ptr;
size_t mem_block_size;
};
// mutex to protect wholememory_vma_map
static std::mutex wholememory_vma_mu;
// map to store memory regions that are in wholememory.
// Key is the tail of a valid memory, the byte of the key is not in wholememory.
// The reason to use tail is that we can check if a pointer is in wholememory by upper_bound.
static std::map<uint64_t, wholememory_vma_data> wholememory_vma_map;
wholememory_handle_t wholememory_get_handle(const void* ptr)
{
std::unique_lock<std::mutex> vma_lock(wholememory_vma_mu);
uint64_t int_ptr = reinterpret_cast<uint64_t>(ptr);
auto it = wholememory_vma_map.upper_bound(int_ptr);
if (it == wholememory_vma_map.end()) return nullptr;
wholememory_handle_t wm_h = it->second.wholememory_handle;
if (wm_h->impl->contains_pointer(ptr)) { return wm_h; }
return nullptr;
}
static void register_wholememory_vma_range_locked(const void* ptr,
size_t mem_block_size,
wholememory_handle_t wm_h)
{
WHOLEMEMORY_CHECK(ptr != nullptr);
WHOLEMEMORY_CHECK(wm_h != nullptr);
WHOLEMEMORY_CHECK(wm_h->impl->contains_pointer(ptr));
uint64_t int_start_ptr = reinterpret_cast<uint64_t>(ptr);
uint64_t int_tail_ptr = int_start_ptr + mem_block_size;
WHOLEMEMORY_CHECK(wm_h->impl->contains_pointer(reinterpret_cast<void*>(int_tail_ptr - 1)));
WHOLEMEMORY_CHECK(wholememory_vma_map.find(int_tail_ptr) == wholememory_vma_map.end());
wholememory_vma_data vma_data;
vma_data.wholememory_handle = wm_h;
vma_data.start_ptr = ptr;
vma_data.mem_block_size = mem_block_size;
wholememory_vma_map.insert(std::pair<uint64_t, wholememory_vma_data>(int_tail_ptr, vma_data));
// No overlap with previous block
auto it1 = wholememory_vma_map.find(int_tail_ptr);
if (it1 != wholememory_vma_map.begin()) {
--it1;
WHOLEMEMORY_CHECK(reinterpret_cast<uint64_t>(it1->second.start_ptr) +
it1->second.mem_block_size <=
int_start_ptr);
}
// No overlap with next block
auto it2 = wholememory_vma_map.find(int_tail_ptr);
if (it2 != wholememory_vma_map.find(wholememory_vma_map.rbegin()->first)) {
++it2;
WHOLEMEMORY_CHECK(reinterpret_cast<uint64_t>(it2->second.start_ptr) >= int_tail_ptr);
}
}
static void unregister_wholememory_vma_range_locked(const void* ptr,
size_t mem_block_size,
wholememory_handle_t wm_h) noexcept
{
try {
WHOLEMEMORY_CHECK(wm_h != nullptr);
WHOLEMEMORY_CHECK(wm_h->impl->contains_pointer(ptr));
uint64_t int_start_ptr = reinterpret_cast<uint64_t>(ptr);
uint64_t int_tail_ptr = int_start_ptr + mem_block_size;
WHOLEMEMORY_CHECK(wm_h->impl->contains_pointer(reinterpret_cast<void*>(int_tail_ptr - 1)));
auto it = wholememory_vma_map.find(int_tail_ptr);
WHOLEMEMORY_CHECK(it != wholememory_vma_map.end());
WHOLEMEMORY_CHECK(it->second.wholememory_handle == wm_h);
WHOLEMEMORY_CHECK(it->second.start_ptr == ptr);
WHOLEMEMORY_CHECK(it->second.mem_block_size == mem_block_size);
wholememory_vma_map.erase(int_tail_ptr);
} catch (const wholememory::logic_error& le) {
WHOLEMEMORY_FAIL_NOTHROW("%s", le.what());
}
}
// Implementation for distributed memory that don't need global map.
// Each rank allocate for its own memory and don't need communication during creation.
// for DISTRIBUTED memory type with DEVICE or HOST location
class distributed_wholememory_impl : public wholememory_impl {
public:
distributed_wholememory_impl(wholememory_handle_t wholememory_handle,
size_t total_size,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
size_t data_granularity)
: wholememory_impl(
wholememory_handle, total_size, comm, memory_type, memory_location, data_granularity)
{
WHOLEMEMORY_CHECK(type_ == WHOLEMEMORY_MT_DISTRIBUTED);
}
void create_memory() override
{
each_rank_same_chunk_strategy();
generate_rank_partition_strategy();
create_local_cuda_runtime_memory();
register_private_memory();
}
void destroy_memory() noexcept override
{
unregister_private_memory();
destroy_local_cuda_runtime_memory();
}
bool contains_pointer(const void* ptr) const override
{
uint64_t int_ptr = reinterpret_cast<uint64_t>(ptr);
uint64_t int_start_ptr = reinterpret_cast<uint64_t>(no_ipc_handle_.local_alloc_mem_ptr);
return int_ptr >= int_start_ptr && int_ptr < int_start_ptr + alloc_strategy_.local_alloc_size;
}
protected:
void register_private_memory()
{
std::unique_lock<std::mutex> vma_lock(wholememory_vma_mu);
if (alloc_strategy_.local_alloc_size > 0) {
register_wholememory_vma_range_locked(
no_ipc_handle_.local_alloc_mem_ptr, alloc_strategy_.local_alloc_size, handle_);
}
}
void unregister_private_memory() noexcept
{
std::unique_lock<std::mutex> vma_lock(wholememory_vma_mu);
if (alloc_strategy_.local_alloc_size > 0) {
unregister_wholememory_vma_range_locked(
no_ipc_handle_.local_alloc_mem_ptr, alloc_strategy_.local_alloc_size, handle_);
}
}
void create_local_cuda_runtime_memory()
{
bool on_device = location_ == WHOLEMEMORY_ML_DEVICE;
void* dev_ptr = nullptr;
size_t alloc_size = alloc_strategy_.local_alloc_size;
if (alloc_size == 0) {
no_ipc_handle_.local_alloc_mem_ptr = nullptr;
return;
}
if (on_device) {
WM_CUDA_CHECK(cudaMalloc(&dev_ptr, alloc_size));
} else {
WM_CUDA_CHECK(cudaMallocHost(&dev_ptr, alloc_size));
}
no_ipc_handle_.local_alloc_mem_ptr = dev_ptr;
local_partition_memory_pointer_ = dev_ptr;
}
void destroy_local_cuda_runtime_memory() noexcept
{
try {
void* ptr = no_ipc_handle_.local_alloc_mem_ptr;
if (no_ipc_handle_.local_alloc_mem_ptr == nullptr) return;
bool on_device = location_ == WHOLEMEMORY_ML_DEVICE;
if (on_device) {
WM_CUDA_CHECK(cudaFree(ptr));
} else {
WM_CUDA_CHECK(cudaFreeHost(ptr));
}
no_ipc_handle_.local_alloc_mem_ptr = nullptr;
} catch (const wholememory::cuda_error& wce) {
WHOLEMEMORY_FAIL_NOTHROW("%s", wce.what());
} catch (const raft::exception& re) {
WHOLEMEMORY_FAIL_NOTHROW("%s", re.what());
}
}
struct no_ipc_handle {
void* local_alloc_mem_ptr = nullptr;
} no_ipc_handle_;
};
// Implementation for host wholememory that need global map.
// Rank 0 allocate all host memory and share between all ranks.
// for CONTINUOUS or CHUNKED type with HOST location
class global_mapped_host_wholememory_impl : public wholememory_impl {
public:
global_mapped_host_wholememory_impl(wholememory_handle_t wholememory_handle,
size_t total_size,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
size_t data_granularity)
: wholememory_impl(
wholememory_handle, total_size, comm, memory_type, memory_location, data_granularity)
{
WHOLEMEMORY_CHECK(type_ == WHOLEMEMORY_MT_CONTINUOUS || type_ == WHOLEMEMORY_MT_CHUNKED);
WHOLEMEMORY_CHECK(location_ == WHOLEMEMORY_ML_HOST);
}
void create_memory() override
{
first_rank_allocate_all_strategy();
generate_rank_partition_strategy();
create_and_map_shared_host_memory();
register_host_memory();
}
void destroy_memory() noexcept override
{
unregister_host_memory();
unmap_and_destroy_shared_host_memory();
}
[[nodiscard]] void* get_continuous_mapping_pointer() const noexcept override
{
return shared_host_handle_.shared_host_memory_ptr;
}
[[nodiscard]] wholememory_gref_t get_global_reference() const noexcept override
{
wholememory_gref_t gref{};
gref.pointer = get_continuous_mapping_pointer();
gref.stride = 0;
return gref;
}
bool contains_pointer(const void* ptr) const override
{
uint64_t int_ptr = reinterpret_cast<uint64_t>(ptr);
uint64_t int_start_ptr = reinterpret_cast<uint64_t>(shared_host_handle_.shared_host_memory_ptr);
return int_ptr >= int_start_ptr && int_ptr < int_start_ptr + total_size_;
}
bool get_rank_memory(void** rank_memory_ptr,
size_t* rank_memory_size,
size_t* rank_memory_offset,
int rank) const noexcept override
{
size_t mem_size, mem_start;
get_rank_partition_info(&mem_size, &mem_start, rank);
if (rank_memory_ptr != nullptr)
*rank_memory_ptr = (char*)get_continuous_mapping_pointer() + mem_start;
if (rank_memory_size != nullptr) *rank_memory_size = mem_size;
if (rank_memory_offset != nullptr) *rank_memory_offset = mem_start;
return true;
}
protected:
void register_host_memory()
{
std::unique_lock<std::mutex> vma_lock(wholememory_vma_mu);
register_wholememory_vma_range_locked(
shared_host_handle_.shared_host_memory_ptr, total_size_, handle_);
}
void unregister_host_memory() noexcept
{
std::unique_lock<std::mutex> vma_lock(wholememory_vma_mu);
unregister_wholememory_vma_range_locked(
shared_host_handle_.shared_host_memory_ptr, total_size_, handle_);
}
static std::string get_host_memory_full_path(wholememory_comm_t wm_comm, int tensor_id)
{
std::string host_memory_full_path = get_shm_prefix(wm_comm);
host_memory_full_path.append("_").append("wm_host_").append(std::to_string(tensor_id));
return host_memory_full_path;
}
#define USE_SYSTEMV_SHM
#define SYSTEMV_SHM_PROJ_ID (0xE601EEEE)
void create_and_map_shared_host_memory()
{
WHOLEMEMORY_CHECK(is_intranode_communicator(comm_));
#ifdef USE_SYSTEMV_SHM
std::string shm_full_path = "/tmp/";
shm_full_path.append(get_host_memory_full_path(comm_, handle_->handle_id));
FILE* shm_fp = fopen(shm_full_path.c_str(), "w");
WHOLEMEMORY_CHECK(shm_fp != nullptr);
WHOLEMEMORY_CHECK(fclose(shm_fp) == 0);
auto shm_key = ftok(shm_full_path.c_str(), SYSTEMV_SHM_PROJ_ID);
WHOLEMEMORY_CHECK(shm_key != (key_t)-1);
int shm_id = -1;
#else
auto shm_full_path = get_host_memory_full_path(comm_, handle_->handle_id);
int shm_fd = -1;
#endif
if (comm_->world_rank == 0) {
#ifdef USE_SYSTEMV_SHM
shm_id = shmget(shm_key, alloc_strategy_.local_alloc_size, 0644 | IPC_CREAT | IPC_EXCL);
if (shm_id == -1) {
WHOLEMEMORY_FATAL(
"Create host shared memory from IPC key %d failed, Reason=%s", shm_key, strerror(errno));
}
#else
shm_fd = shm_open(shm_full_path.c_str(), O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
if (shm_fd < 0) {
WHOLEMEMORY_FATAL("Create host shared memory from file %s failed, Reason=%s.",
shm_full_path.c_str(),
strerror(errno));
}
WHOLEMEMORY_CHECK(ftruncate(shm_fd, alloc_strategy_.local_alloc_size) == 0);
#endif
communicator_barrier(comm_);
} else {
communicator_barrier(comm_);
#ifdef USE_SYSTEMV_SHM
shm_id = shmget(shm_key, alloc_strategy_.local_alloc_size, 0644);
if (shm_id == -1) {
WHOLEMEMORY_FATAL(
"Get host shared memory from IPC key %d failed, Reason=%s", shm_key, strerror(errno));
}
#else
shm_fd = shm_open(shm_full_path.c_str(), O_RDWR, S_IRUSR | S_IWUSR);
if (shm_fd < 0) {
WHOLEMEMORY_FATAL("Rank=%d open host shared memory from file %s failed.",
comm_->world_rank,
shm_full_path.c_str());
}
#endif
}
communicator_barrier(comm_);
void* mmap_ptr = nullptr;
#ifdef USE_SYSTEMV_SHM
mmap_ptr = shmat(shm_id, nullptr, 0);
WHOLEMEMORY_CHECK(mmap_ptr != (void*)-1);
#else
mmap_ptr = mmap(
nullptr, alloc_strategy_.total_alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
WHOLEMEMORY_CHECK(mmap_ptr != (void*)-1);
#endif
memset(static_cast<char*>(mmap_ptr) + rank_partition_strategy_.local_mem_offset,
0,
rank_partition_strategy_.local_mem_size);
WM_CUDA_CHECK_NO_THROW(
cudaHostRegister(mmap_ptr, alloc_strategy_.total_alloc_size, cudaHostRegisterDefault));
#ifndef USE_SYSTEMV_SHM
WHOLEMEMORY_CHECK(close(shm_fd) == 0);
#endif
void* dev_ptr = nullptr;
WM_CUDA_CHECK_NO_THROW(cudaHostGetDevicePointer(&dev_ptr, mmap_ptr, 0));
WHOLEMEMORY_CHECK(dev_ptr == mmap_ptr);
shared_host_handle_.shared_host_memory_ptr = dev_ptr;
local_partition_memory_pointer_ =
static_cast<char*>(dev_ptr) + rank_partition_strategy_.local_mem_offset;
}
void unmap_and_destroy_shared_host_memory() noexcept
{
try {
void* ptr = shared_host_handle_.shared_host_memory_ptr;
if (ptr == nullptr) return;
WM_CUDA_CHECK(cudaHostUnregister(ptr));
#ifdef USE_SYSTEMV_SHM
std::string shm_full_path = "/tmp/";
shm_full_path.append(get_host_memory_full_path(comm_, handle_->handle_id));
auto shm_key = ftok(shm_full_path.c_str(), SYSTEMV_SHM_PROJ_ID);
WHOLEMEMORY_CHECK(shm_key != (key_t)-1);
int shm_id = shmget(shm_key, alloc_strategy_.local_alloc_size, 0644);
if (shm_id == -1) {
WHOLEMEMORY_FATAL("Get host shared memory from IPC key %d for delete failed, Reason=%s",
shm_key,
strerror(errno));
}
WHOLEMEMORY_CHECK(shmdt(ptr) == 0);
#else
auto shm_full_path = get_host_memory_full_path(comm_, handle_->handle_id);
WHOLEMEMORY_CHECK(munmap(ptr, alloc_strategy_.total_alloc_size) == 0);
#endif
communicator_barrier(comm_);
#ifdef USE_SYSTEMV_SHM
if (comm_->world_rank == 0) {
WHOLEMEMORY_CHECK(shmctl(shm_id, IPC_RMID, nullptr) == 0);
WHOLEMEMORY_CHECK(unlink(shm_full_path.c_str()) == 0);
}
#else
if (comm_->world_rank == 0) { WHOLEMEMORY_CHECK(shm_unlink(shm_full_path.c_str()) == 0); }
#endif
communicator_barrier(comm_);
shared_host_handle_.shared_host_memory_ptr = nullptr;
} catch (const wholememory::logic_error& wle) {
WHOLEMEMORY_FAIL_NOTHROW("%s", wle.what());
} catch (const wholememory::cuda_error& wce) {
WHOLEMEMORY_FAIL_NOTHROW("%s", wce.what());
} catch (const raft::exception& re) {
WHOLEMEMORY_FAIL_NOTHROW("%s", re.what());
}
}
struct shared_host_handle {
void* shared_host_memory_ptr = nullptr;
} shared_host_handle_;
};
// Implementation for continuous device wholememory that need global map.
// Each rank allocate multiple pages and share pages with other ranks.
// for CONTINUOUS type with DEVICE location
class continuous_device_wholememory_impl : public wholememory_impl {
public:
continuous_device_wholememory_impl(wholememory_handle_t wholememory_handle,
size_t total_size,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
size_t data_granularity)
: wholememory_impl(
wholememory_handle, total_size, comm, memory_type, memory_location, data_granularity)
{
WHOLEMEMORY_CHECK(type_ == WHOLEMEMORY_MT_CONTINUOUS);
}
void create_memory() override
{
WHOLEMEMORY_CHECK(location_ == WHOLEMEMORY_ML_DEVICE);
each_rank_multiple_page_strategy();
generate_rank_partition_strategy();
create_and_map_driver_device_memory();
register_continuous_device_memory();
}
void destroy_memory() noexcept override
{
unregister_continuous_device_memory();
unmap_and_destroy_driver_device_memory();
}
[[nodiscard]] void* get_continuous_mapping_pointer() const noexcept override
{
return cu_alloc_handle_.mapped_whole_memory;
}
[[nodiscard]] wholememory_gref_t get_global_reference() const noexcept override
{
wholememory_gref_t gref{};
gref.pointer = get_continuous_mapping_pointer();
gref.stride = 0;
return gref;
}
bool contains_pointer(const void* ptr) const override
{
uint64_t int_ptr = reinterpret_cast<uint64_t>(ptr);
uint64_t int_start_ptr = reinterpret_cast<uint64_t>(cu_alloc_handle_.mapped_whole_memory);
return int_ptr >= int_start_ptr && int_ptr < int_start_ptr + total_size_;
}
bool get_rank_memory(void** rank_memory_ptr,
size_t* rank_memory_size,
size_t* rank_memory_offset,
int rank) const noexcept override
{
size_t mem_size, mem_start;
get_rank_partition_info(&mem_size, &mem_start, rank);
if (rank_memory_ptr != nullptr)
*rank_memory_ptr = (char*)get_continuous_mapping_pointer() + mem_start;
if (rank_memory_size != nullptr) *rank_memory_size = mem_size;
if (rank_memory_offset != nullptr) *rank_memory_offset = mem_start;
return true;
}
protected:
void register_continuous_device_memory()
{
std::unique_lock<std::mutex> vma_lock(wholememory_vma_mu);
register_wholememory_vma_range_locked(
cu_alloc_handle_.mapped_whole_memory, total_size_, handle_);
}
void unregister_continuous_device_memory() noexcept
{
std::unique_lock<std::mutex> vma_lock(wholememory_vma_mu);
unregister_wholememory_vma_range_locked(
cu_alloc_handle_.mapped_whole_memory, total_size_, handle_);
}
struct ipc_sharable_cu_handle {
int fd = -1;
};
static CUmemGenericAllocationHandle create_cu_mem(size_t mem_size, int dev_id)
{
CUmemGenericAllocationHandle h;
CUmemAllocationProp prop;
memset(&prop, 0, sizeof(prop));
prop.type = CU_MEM_ALLOCATION_TYPE_PINNED;
prop.requestedHandleTypes = CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR;
prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE;
prop.allocFlags.compressionType = CU_MEM_ALLOCATION_COMP_NONE;
prop.location.id = dev_id;
WM_CU_CHECK(cuMemCreate(&h, mem_size, &prop, 0));
return h;
}
static ipc_sharable_cu_handle create_sharable_handle(CUmemGenericAllocationHandle h)
{
ipc_sharable_cu_handle sharable_cu_handle;
sharable_cu_handle.fd = -1;
if (h != 0) {
WM_CU_CHECK(cuMemExportToShareableHandle(
&sharable_cu_handle.fd, h, CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR, 0));
}
return sharable_cu_handle;
}
static int ipc_open_socket(const std::string& name)
{
int sock = -1;
struct sockaddr_un skt_addr {
0
};
if ((sock = socket(AF_UNIX, SOCK_DGRAM, 0)) < 0) {
WHOLEMEMORY_FATAL("IPC failure: Socket creation error.");
}
bzero(&skt_addr, sizeof(skt_addr));
skt_addr.sun_family = AF_UNIX;
if (name.length() >= sizeof(skt_addr.sun_path)) {
WHOLEMEMORY_FATAL(
"IPC socket path length (%zu) larger than sockaddr_un.sun_path length (%lu), full_path: %s",
name.length(),
sizeof(skt_addr.sun_path),
name.c_str());
}
strcpy(skt_addr.sun_path, name.c_str());
if (bind(sock, reinterpret_cast<struct sockaddr*>(&skt_addr), sizeof(skt_addr)) < 0) {
WHOLEMEMORY_FATAL("IPC failure: Binding socket failed, name=%s", name.c_str());
}
return sock;
}
static void ipc_close_socket(int fd, const std::string& name)
{
WHOLEMEMORY_CHECK(fd >= 0);
WHOLEMEMORY_CHECK(!name.empty());
WHOLEMEMORY_CHECK(unlink(name.c_str()) == 0);
WHOLEMEMORY_CHECK(close(fd) == 0);
};
[[nodiscard]] std::string get_sender_fd_name() const
{
std::string name = get_temporary_directory_path(comm_);
name.append("/sender_").append(std::to_string(comm_->world_rank)).append(".sock");
return name;
}
[[nodiscard]] std::string get_recver_fd_name(int src_id) const
{
std::string name = get_temporary_directory_path(comm_);
name.append("/recver_")
.append(std::to_string(src_id))
.append("_to_")
.append(std::to_string(comm_->world_rank))
.append(".sock");
return name;
}
[[nodiscard]] std::string get_target_recver_fd_name(int dst_id) const
{
std::string name = get_temporary_directory_path(comm_);
name.append("/recver_")
.append(std::to_string(comm_->world_rank))
.append("_to_")
.append(std::to_string(dst_id))
.append(".sock");
return name;
}
void open_unix_domain_sockets()
{
communicator_barrier(comm_);
cu_alloc_handle_.recv_fds.clear();
cu_alloc_handle_.recv_fds.resize(comm_->world_size, -1);
for (int i = 0; i < comm_->world_size; i++) {
cu_alloc_handle_.recv_fds[i] = ipc_open_socket(get_recver_fd_name(i));
}
cu_alloc_handle_.send_fd = ipc_open_socket(get_sender_fd_name());
communicator_barrier(comm_);
}
void close_unix_domain_sockets()
{
communicator_barrier(comm_);
ipc_close_socket(cu_alloc_handle_.send_fd, get_sender_fd_name());
cu_alloc_handle_.send_fd = -1;
WHOLEMEMORY_CHECK(cu_alloc_handle_.recv_fds.size() == comm_->world_size);
for (int i = 0; i < comm_->world_size; i++) {
ipc_close_socket(cu_alloc_handle_.recv_fds[i], get_recver_fd_name(i));
}
communicator_barrier(comm_);
}
static void ipc_send_sharable_handle(int sock_fd,
const ipc_sharable_cu_handle& sent_handle,
const std::string& dst_name)
{
struct msghdr message_header {};
struct iovec iov[1];
union {
struct cmsghdr cm;
char control[CMSG_SPACE(sizeof(int))];
} control_un{};
struct cmsghdr* cmptr;
struct sockaddr_un cliaddr {};
// Construct client address to send this Shareable handle to
bzero(&cliaddr, sizeof(cliaddr));
cliaddr.sun_family = AF_UNIX;
strcpy(cliaddr.sun_path, dst_name.c_str());
// Send corresponding shareable handle to the client
int sendfd = sent_handle.fd;
message_header.msg_control = control_un.control;
message_header.msg_controllen = sizeof(control_un.control);
cmptr = CMSG_FIRSTHDR(&message_header);
cmptr->cmsg_len = CMSG_LEN(sizeof(int));
cmptr->cmsg_level = SOL_SOCKET;
cmptr->cmsg_type = SCM_RIGHTS;
memmove(CMSG_DATA(cmptr), &sendfd, sizeof(sendfd));
message_header.msg_name = static_cast<void*>(&cliaddr);
message_header.msg_namelen = sizeof(struct sockaddr_un);
iov[0].iov_base = const_cast<void*>(static_cast<const void*>(""));
iov[0].iov_len = 1;
message_header.msg_iov = iov;
message_header.msg_iovlen = 1;
ssize_t send_result = sendmsg(sock_fd, &message_header, 0);
if (send_result <= 0) {
WHOLEMEMORY_FATAL("IPC failure: Sending data over socket failed send_result=%ld.",
send_result);
}
}
static ipc_sharable_cu_handle ipc_recv_sharable_handle(int recv_sock_fd)
{
struct msghdr message_header = {nullptr};
struct iovec iov[1];
// Union to guarantee alignment requirements for control array
union {
struct cmsghdr cm;
char control[CMSG_SPACE(sizeof(int))];
} control_un{};
struct cmsghdr* cmptr;
ssize_t n;
int received_fd = -1;
char dummy_buffer[1];
message_header.msg_control = control_un.control;
message_header.msg_controllen = sizeof(control_un.control);
iov[0].iov_base = static_cast<void*>(&dummy_buffer[0]);
iov[0].iov_len = sizeof(dummy_buffer);
message_header.msg_iov = iov;
message_header.msg_iovlen = 1;
if ((n = recvmsg(recv_sock_fd, &message_header, 0)) <= 0) {
WHOLEMEMORY_FATAL("IPC failure: Receiving data over socket failed, recvmsg returned %ld", n);
}
if (((cmptr = CMSG_FIRSTHDR(&message_header)) != nullptr) &&
(cmptr->cmsg_len == CMSG_LEN(sizeof(int)))) {
if ((cmptr->cmsg_level != SOL_SOCKET) || (cmptr->cmsg_type != SCM_RIGHTS)) {
WHOLEMEMORY_FATAL("Non socket received");
}
memmove(&received_fd, CMSG_DATA(cmptr), sizeof(received_fd));
} else {
WHOLEMEMORY_FATAL("Recv cm_ptr=%p, cmsg_len=%ld", cmptr, (cmptr ? cmptr->cmsg_len : -1));
}
ipc_sharable_cu_handle sharable_cu_handle;
sharable_cu_handle.fd = received_fd;
return sharable_cu_handle;
}
void exchange_driver_device_memory_handles(
std::vector<ipc_sharable_cu_handle>* recv_ipc_sharable_cu_handles,
std::vector<ipc_sharable_cu_handle>* send_ipc_sharable_cu_handles)
{
for (int r = 0; r < comm_->world_size; r++) {
if ((*send_ipc_sharable_cu_handles)[r].fd >= 0) {
ipc_send_sharable_handle(cu_alloc_handle_.send_fd,
(*send_ipc_sharable_cu_handles)[r],
get_target_recver_fd_name(r));
}
}
communicator_barrier(comm_);
recv_ipc_sharable_cu_handles->resize(comm_->world_size);
for (int r = 0; r < comm_->world_size; r++) {
if (alloc_strategy_.alloc_sizes[r] > 0) {
(*recv_ipc_sharable_cu_handles)[r] = ipc_recv_sharable_handle(cu_alloc_handle_.recv_fds[r]);
}
}
communicator_barrier(comm_);
if (cu_alloc_handle_.local_ipc_handle.fd >= 0) {
WHOLEMEMORY_CHECK(close(cu_alloc_handle_.local_ipc_handle.fd) == 0);
}
send_ipc_sharable_cu_handles->clear();
}
static CUmemGenericAllocationHandle import_cu_mem_handle(
ipc_sharable_cu_handle sharable_cu_handle)
{
CUmemGenericAllocationHandle h;
WM_CU_CHECK(cuMemImportFromShareableHandle(
&h, (void*)(uintptr_t)sharable_cu_handle.fd, CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR));
return h;
}
void map_driver_device_memory_handles(
std::vector<ipc_sharable_cu_handle>* recv_ipc_sharable_cu_handles)
{
cu_alloc_handle_.all_cu_handles.resize(comm_->world_size);
for (int i = 0; i < comm_->world_size; i++) {
size_t mem_size = alloc_strategy_.alloc_sizes[i];
if (mem_size > 0) {
WHOLEMEMORY_CHECK((*recv_ipc_sharable_cu_handles)[i].fd >= 0);
cu_alloc_handle_.all_cu_handles[i] =
import_cu_mem_handle((*recv_ipc_sharable_cu_handles)[i]);
WM_CU_CHECK(cuMemMap(reinterpret_cast<CUdeviceptr>(cu_alloc_handle_.mapped_whole_memory) +
alloc_strategy_.alloc_offsets[i],
mem_size,
0,
cu_alloc_handle_.all_cu_handles[i],
0));
WHOLEMEMORY_CHECK(close((*recv_ipc_sharable_cu_handles)[i].fd) == 0);
} else {
WHOLEMEMORY_CHECK((*recv_ipc_sharable_cu_handles)[i].fd == -1);
}
recv_ipc_sharable_cu_handles->clear();
}
CUmemAccessDesc madesc;
madesc.location.type = CU_MEM_LOCATION_TYPE_DEVICE;
madesc.location.id = comm_->dev_id;
madesc.flags = CU_MEM_ACCESS_FLAGS_PROT_READWRITE;
WM_CU_CHECK(cuMemSetAccess(reinterpret_cast<CUdeviceptr>(cu_alloc_handle_.mapped_whole_memory),
alloc_strategy_.total_alloc_size,
&madesc,
1));
}
void create_and_map_driver_device_memory()
{
WM_CU_CHECK(
cuMemAddressReserve(reinterpret_cast<CUdeviceptr*>(&cu_alloc_handle_.mapped_whole_memory),
alloc_strategy_.total_alloc_size,
alloc_strategy_.alignment,
0,
0));
cu_alloc_handle_.all_cu_handles.resize(comm_->world_size, 0);
std::vector<ipc_sharable_cu_handle> send_ipc_sharable_cu_handles(comm_->world_size);
std::vector<ipc_sharable_cu_handle> recv_ipc_sharable_cu_handles;
cu_alloc_handle_.local_cu_handle = 0;
if (alloc_strategy_.local_alloc_size > 0) {
cu_alloc_handle_.local_cu_handle =
create_cu_mem(alloc_strategy_.local_alloc_size, comm_->dev_id);
}
cu_alloc_handle_.local_ipc_handle = create_sharable_handle(cu_alloc_handle_.local_cu_handle);
for (int i = 0; i < comm_->world_size; i++) {
send_ipc_sharable_cu_handles[i] = cu_alloc_handle_.local_ipc_handle;
}
open_unix_domain_sockets();
exchange_driver_device_memory_handles(&recv_ipc_sharable_cu_handles,
&send_ipc_sharable_cu_handles);
close_unix_domain_sockets();
map_driver_device_memory_handles(&recv_ipc_sharable_cu_handles);
communicator_barrier(comm_);
local_partition_memory_pointer_ = static_cast<char*>(cu_alloc_handle_.mapped_whole_memory) +
rank_partition_strategy_.local_mem_offset;
}
void unmap_and_destroy_driver_device_memory() noexcept
{
try {
communicator_barrier(comm_);
for (int i = 0; i < comm_->world_size; i++) {
size_t mem_size = alloc_strategy_.alloc_sizes[i];
if (mem_size > 0) {
WM_CU_CHECK(
cuMemUnmap(reinterpret_cast<CUdeviceptr>(cu_alloc_handle_.mapped_whole_memory) +
alloc_strategy_.alloc_offsets[i],
mem_size));
WM_CU_CHECK(cuMemRelease(cu_alloc_handle_.all_cu_handles[i]));
}
}
communicator_barrier(comm_);
if (alloc_strategy_.local_alloc_size > 0) {
WM_CU_CHECK(cuMemRelease(cu_alloc_handle_.local_cu_handle));
}
WM_CU_CHECK(
cuMemAddressFree(reinterpret_cast<CUdeviceptr>(cu_alloc_handle_.mapped_whole_memory),
alloc_strategy_.total_alloc_size));
communicator_barrier(comm_);
} catch (const wholememory::cu_error& wce) {
WHOLEMEMORY_FAIL_NOTHROW("%s", wce.what());
} catch (const raft::exception& re) {
WHOLEMEMORY_FAIL_NOTHROW("%s", re.what());
}
}
struct cu_alloc_handle {
CUmemGenericAllocationHandle local_cu_handle = 0;
std::vector<CUmemGenericAllocationHandle> all_cu_handles;
void* mapped_whole_memory = nullptr;
ipc_sharable_cu_handle local_ipc_handle;
#if CUDA_VERSION >= 12030
CUmemFabricHandle local_ipc_fabric_handle;
#endif
int send_fd = -1;
std::vector<int> recv_fds;
} cu_alloc_handle_;
};
// Implementation for chunked device wholememory that need global map.
// Each rank allocate same size memory chunk and share with other ranks.
// for CHUNKED type with DEVICE location
class chunked_device_wholememory_impl : public wholememory_impl {
public:
chunked_device_wholememory_impl(wholememory_handle_t wholememory_handle,
size_t total_size,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
size_t data_granularity)
: wholememory_impl(
wholememory_handle, total_size, comm, memory_type, memory_location, data_granularity)
{
WHOLEMEMORY_CHECK(type_ == WHOLEMEMORY_MT_CHUNKED);
WHOLEMEMORY_CHECK(location_ == WHOLEMEMORY_ML_DEVICE);
}
void create_memory() override
{
each_rank_same_chunk_strategy();
generate_rank_partition_strategy();
create_and_map_runtime_device_memory();
register_chunked_device_memory();
}
void destroy_memory() noexcept override
{
unregister_chunked_device_memory();
unmap_and_destroy_runtime_device_memory();
}
[[nodiscard]] wholememory_gref_t get_global_reference() const noexcept override { return gref_; }
bool contains_pointer(const void* ptr) const override
{
uint64_t int_ptr = reinterpret_cast<uint64_t>(ptr);
size_t acc_size = 0;
for (int i = 0; i < comm_->world_size; i++) {
size_t mem_size_of_this_rank_and_after = total_size_ - acc_size;
size_t mem_size_for_current_rank =
std::min(mem_size_of_this_rank_and_after, rank_partition_strategy_.partition_mem_stride);
uint64_t int_start_ptr = reinterpret_cast<uint64_t>(cuda_ipc_handle_.mapped_ptrs[i]);
if (int_ptr >= int_start_ptr && int_ptr < int_start_ptr + mem_size_for_current_rank) {
return true;
}
acc_size += mem_size_for_current_rank;
}
return false;
}
bool get_rank_memory(void** rank_memory_ptr,
size_t* rank_memory_size,
size_t* rank_memory_offset,
int rank) const noexcept override
{
size_t mem_size, mem_start;
get_rank_partition_info(&mem_size, &mem_start, rank);
if (rank_memory_ptr != nullptr) *rank_memory_ptr = cuda_ipc_handle_.mapped_ptrs[rank];
if (rank_memory_size != nullptr) *rank_memory_size = mem_size;
if (rank_memory_offset != nullptr) *rank_memory_offset = mem_start;
return true;
}
protected:
void register_chunked_device_memory()
{
std::unique_lock<std::mutex> vma_lock(wholememory_vma_mu);
size_t acc_size = 0;
for (int i = 0; i < comm_->world_size; i++) {
size_t mem_size_of_this_rank_and_after = total_size_ - acc_size;
size_t mem_size_for_current_rank =
std::min(mem_size_of_this_rank_and_after, rank_partition_strategy_.partition_mem_stride);
if (mem_size_for_current_rank > 0) {
register_wholememory_vma_range_locked(
cuda_ipc_handle_.mapped_ptrs[i], mem_size_for_current_rank, handle_);
}
acc_size += mem_size_for_current_rank;
}
}
void unregister_chunked_device_memory() noexcept
{
std::unique_lock<std::mutex> vma_lock(wholememory_vma_mu);
size_t acc_size = 0;
for (int i = 0; i < comm_->world_size; i++) {
size_t mem_size_of_this_rank_and_after = total_size_ - acc_size;
size_t mem_size_for_current_rank =
std::min(mem_size_of_this_rank_and_after, rank_partition_strategy_.partition_mem_stride);
if (mem_size_for_current_rank > 0) {
unregister_wholememory_vma_range_locked(
cuda_ipc_handle_.mapped_ptrs[i], mem_size_for_current_rank, handle_);
}
acc_size += mem_size_for_current_rank;
}
}
void create_and_map_runtime_device_memory()
{
cuda_ipc_handle_.mapped_ptrs.resize(comm_->world_size, nullptr);
cuda_ipc_handle_.all_mem_handles.resize(comm_->world_size);
WM_CUDA_CHECK(
cudaMalloc((void**)&cuda_ipc_handle_.local_mem_ptr, alloc_strategy_.local_alloc_size));
WM_CUDA_CHECK(
cudaIpcGetMemHandle(&cuda_ipc_handle_.local_ipc_handle, cuda_ipc_handle_.local_mem_ptr));
comm_->host_allgather(&cuda_ipc_handle_.local_ipc_handle,
cuda_ipc_handle_.all_mem_handles.data(),
sizeof(cuda_ipc_handle_.local_ipc_handle),
WHOLEMEMORY_DT_INT8);
for (int i = 0; i < comm_->world_size; i++) {
if (i == comm_->world_rank) {
cuda_ipc_handle_.mapped_ptrs[i] = cuda_ipc_handle_.local_mem_ptr;
} else {
WM_CUDA_CHECK(cudaIpcOpenMemHandle(&cuda_ipc_handle_.mapped_ptrs[i],
cuda_ipc_handle_.all_mem_handles[i],
cudaIpcMemLazyEnablePeerAccess));
}
}
local_partition_memory_pointer_ = cuda_ipc_handle_.local_mem_ptr;
WM_CUDA_CHECK(cudaMalloc(&gref_.pointer, sizeof(void*) * comm_->world_size));
WM_CUDA_CHECK(cudaMemcpy(gref_.pointer,
cuda_ipc_handle_.mapped_ptrs.data(),
sizeof(void*) * comm_->world_size,
cudaMemcpyHostToDevice));
gref_.stride = rank_partition_strategy_.partition_mem_stride;
}
void unmap_and_destroy_runtime_device_memory() noexcept
{
try {
WM_CUDA_CHECK(cudaFree(gref_.pointer));
gref_.pointer = nullptr;
for (int i = 0; i < comm_->world_size; i++) {
if (i != comm_->world_rank) {
WM_CUDA_CHECK(cudaIpcCloseMemHandle(cuda_ipc_handle_.mapped_ptrs[i]));
}
}
// Check all memory unmapped.
communicator_barrier(comm_);
WM_CUDA_CHECK(cudaFree(cuda_ipc_handle_.local_mem_ptr));
} catch (const wholememory::cuda_error& wce) {
WHOLEMEMORY_FAIL_NOTHROW("%s", wce.what());
} catch (const raft::exception& re) {
WHOLEMEMORY_FAIL_NOTHROW("%s", re.what());
}
}
struct cuda_ipc_handle {
cudaIpcMemHandle_t local_ipc_handle;
std::vector<cudaIpcMemHandle_t> all_mem_handles;
std::vector<void*> mapped_ptrs;
void* local_mem_ptr;
} cuda_ipc_handle_;
wholememory_gref_t gref_;
};
// Implementation for MNNVL wholememory that use cuda driver api.
// Each rank allocate multiple pages and share pages with other ranks.
// for CONTINUOUS type with HOST or DEVICE location
#if CUDA_VERSION >= 12030
class continuous_mnnvl_wholememory_impl : public continuous_device_wholememory_impl {
public:
continuous_mnnvl_wholememory_impl(wholememory_handle_t wholememory_handle,
size_t total_size,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
size_t data_granularity)
: continuous_device_wholememory_impl(
wholememory_handle, total_size, comm, memory_type, memory_location, data_granularity)
{
WHOLEMEMORY_INFO("Using continuous_mnnvl_wholememory_impl");
WHOLEMEMORY_CHECK_NOTHROW(type_ == WHOLEMEMORY_MT_CONTINUOUS);
}
void check_valid()
{
if (location_ == WHOLEMEMORY_ML_HOST) { WHOLEMEMORY_CHECK_NOTHROW(SupportEGM()); }
WHOLEMEMORY_CHECK_NOTHROW(SupportMNNVL());
}
void create_memory() override
{
check_valid();
each_rank_multiple_page_strategy();
generate_rank_partition_strategy();
create_and_map_driver_memory();
register_continuous_mnnvl_memory();
}
void destroy_memory() noexcept override
{
unregister_continuous_mnnvl_memory();
unmap_and_destroy_driver_host_memory();
}
protected:
void register_continuous_mnnvl_memory()
{
std::unique_lock<std::mutex> vma_lock(wholememory_vma_mu);
register_wholememory_vma_range_locked(
cu_alloc_handle_.mapped_whole_memory, total_size_, handle_);
}
void unregister_continuous_mnnvl_memory() noexcept
{
std::unique_lock<std::mutex> vma_lock(wholememory_vma_mu);
unregister_wholememory_vma_range_locked(
cu_alloc_handle_.mapped_whole_memory, total_size_, handle_);
}
static CUmemGenericAllocationHandle create_cu_mem(size_t mem_size,
int dev_id,
wholememory_memory_location_t location)
{
CUmemGenericAllocationHandle h;
CUmemAllocationProp prop;
memset(&prop, 0, sizeof(prop));
if (location == WHOLEMEMORY_ML_HOST) {
int numa_id;
cuDeviceGetAttribute(&numa_id, CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID, dev_id);
prop.type = CU_MEM_ALLOCATION_TYPE_PINNED;
prop.requestedHandleTypes = CU_MEM_HANDLE_TYPE_FABRIC;
prop.location.type = CU_MEM_LOCATION_TYPE_HOST_NUMA;
prop.location.id = numa_id;
prop.allocFlags.compressionType = CU_MEM_ALLOCATION_COMP_NONE;
} else {
WHOLEMEMORY_CHECK_NOTHROW(location == WHOLEMEMORY_ML_DEVICE);
prop.type = CU_MEM_ALLOCATION_TYPE_PINNED;
prop.requestedHandleTypes = CU_MEM_HANDLE_TYPE_FABRIC;
prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE;
prop.location.id = dev_id;
prop.allocFlags.compressionType = CU_MEM_ALLOCATION_COMP_NONE;
}
WM_CU_CHECK_NO_THROW(cuMemCreate(&h, mem_size, &prop, 0));
return h;
}
static CUmemFabricHandle create_sharable_fabric_handle(CUmemGenericAllocationHandle h)
{
CUmemFabricHandle fabric_handle;
if (h != 0) {
WM_CU_CHECK_NO_THROW(
cuMemExportToShareableHandle(&fabric_handle, h, CU_MEM_HANDLE_TYPE_FABRIC, 0));
}
return fabric_handle;
}
void exchange_driver_host_memory_handles(
std::vector<CUmemFabricHandle>* recv_ipc_sharable_cu_fabric_handles,
CUmemFabricHandle* send_ipc_sharable_cu_fabric_handle)
{
communicator_barrier(comm_);
recv_ipc_sharable_cu_fabric_handles->resize(comm_->world_size);
comm_->host_allgather(static_cast<const void*>(send_ipc_sharable_cu_fabric_handle),
static_cast<void*>(recv_ipc_sharable_cu_fabric_handles->data()),
sizeof(CUmemFabricHandle),
WHOLEMEMORY_DT_INT8);
communicator_barrier(comm_);
}
CUmemGenericAllocationHandle import_cu_mem_handle(CUmemFabricHandle sharable_cu_fabric_handle,
bool same_rank)
{
CUmemGenericAllocationHandle h;
if (!same_rank) {
WM_CU_CHECK_NO_THROW(
cuMemImportFromShareableHandle(&h, &sharable_cu_fabric_handle, CU_MEM_HANDLE_TYPE_FABRIC));
} else {
h = cu_alloc_handle_.local_cu_handle;
}
return h;
}
void map_driver_memory_handles(
std::vector<CUmemFabricHandle>* recv_ipc_sharable_cu_fabric_handles)
{
cu_alloc_handle_.all_cu_handles.resize(comm_->world_size);
for (int i = 0; i < comm_->world_size; i++) {
size_t mem_size = alloc_strategy_.alloc_sizes[i];
if (mem_size > 0) {
cu_alloc_handle_.all_cu_handles[i] =
import_cu_mem_handle((*recv_ipc_sharable_cu_fabric_handles)[i], i == comm_->world_rank);
WM_CU_CHECK_NO_THROW(
cuMemMap(reinterpret_cast<CUdeviceptr>(cu_alloc_handle_.mapped_whole_memory) +
alloc_strategy_.alloc_offsets[i],
mem_size,
0,
cu_alloc_handle_.all_cu_handles[i],
0));
}
recv_ipc_sharable_cu_fabric_handles->clear();
}
CUmemAccessDesc madesc;
madesc.location.type = CU_MEM_LOCATION_TYPE_DEVICE;
madesc.location.id = comm_->dev_id;
madesc.flags = CU_MEM_ACCESS_FLAGS_PROT_READWRITE;
WM_CU_CHECK_NO_THROW(
cuMemSetAccess(reinterpret_cast<CUdeviceptr>(cu_alloc_handle_.mapped_whole_memory),
alloc_strategy_.total_alloc_size,
&madesc,
1));
}
void create_and_map_driver_memory()
{
WM_CU_CHECK(
cuMemAddressReserve(reinterpret_cast<CUdeviceptr*>(&cu_alloc_handle_.mapped_whole_memory),
alloc_strategy_.total_alloc_size,
alloc_strategy_.alignment,
0,
0));
cu_alloc_handle_.all_cu_handles.resize(comm_->world_size, 0);
std::vector<CUmemFabricHandle> recv_ipc_sharable_cu_fabric_handles;
cu_alloc_handle_.local_cu_handle = 0;
if (alloc_strategy_.local_alloc_size > 0) {
cu_alloc_handle_.local_cu_handle =
create_cu_mem(alloc_strategy_.local_alloc_size, comm_->dev_id, location_);
}
cu_alloc_handle_.local_ipc_fabric_handle =
create_sharable_fabric_handle(cu_alloc_handle_.local_cu_handle);
exchange_driver_host_memory_handles(&recv_ipc_sharable_cu_fabric_handles,
&cu_alloc_handle_.local_ipc_fabric_handle);
map_driver_memory_handles(&recv_ipc_sharable_cu_fabric_handles);
local_partition_memory_pointer_ = static_cast<char*>(cu_alloc_handle_.mapped_whole_memory) +
rank_partition_strategy_.local_mem_offset;
}
void unmap_and_destroy_driver_host_memory() noexcept
{
try {
communicator_barrier(comm_);
for (int i = 0; i < comm_->world_size; i++) {
size_t mem_size = alloc_strategy_.alloc_sizes[i];
if (mem_size > 0) {
WM_CU_CHECK(
cuMemUnmap(reinterpret_cast<CUdeviceptr>(cu_alloc_handle_.mapped_whole_memory) +
alloc_strategy_.alloc_offsets[i],
mem_size));
if (i != comm_->world_rank) {
WM_CU_CHECK(cuMemRelease(cu_alloc_handle_.all_cu_handles[i]));
}
}
}
communicator_barrier(comm_);
if (alloc_strategy_.local_alloc_size > 0) {
WM_CU_CHECK(cuMemRelease(cu_alloc_handle_.local_cu_handle));
}
WM_CU_CHECK(
cuMemAddressFree(reinterpret_cast<CUdeviceptr>(cu_alloc_handle_.mapped_whole_memory),
alloc_strategy_.total_alloc_size));
communicator_barrier(comm_);
} catch (const wholememory::cu_error& wce) {
WHOLEMEMORY_FAIL_NOTHROW("%s", wce.what());
} catch (const raft::exception& re) {
WHOLEMEMORY_FAIL_NOTHROW("%s", re.what());
}
}
};
#endif
void wholememory_impl::generate_rank_partition_strategy()
{
size_t data_slot_count = total_size_ / data_granularity_;
size_t data_slot_per_rank = determine_entry_partition_plan(data_slot_count, comm_->world_size);
size_t rank_data_slot_start = std::min(comm_->world_rank * data_slot_per_rank, data_slot_count);
size_t rank_data_slot_end =
std::min((comm_->world_rank + 1) * data_slot_per_rank, data_slot_count);
size_t rank_data_slot_count = rank_data_slot_end - rank_data_slot_start;
rank_partition_strategy_.local_mem_size = rank_data_slot_count * data_granularity_;
rank_partition_strategy_.local_mem_offset = rank_data_slot_start * data_granularity_;
rank_partition_strategy_.partition_mem_stride = data_slot_per_rank * data_granularity_;
}
void wholememory_impl::first_rank_allocate_all_strategy()
{
alloc_strategy_.total_alloc_size = total_size_;
// only first rank allocate memory
alloc_strategy_.local_alloc_size =
(comm_->world_rank == 0) ? alloc_strategy_.total_alloc_size : 0;
alloc_strategy_.alignment = comm_->alloc_granularity;
alloc_strategy_.alloc_offsets.clear();
alloc_strategy_.alloc_offsets.resize(comm_->world_size, alloc_strategy_.total_alloc_size);
alloc_strategy_.alloc_offsets[0] = 0;
alloc_strategy_.alloc_sizes.clear();
alloc_strategy_.alloc_sizes.resize(comm_->world_size, 0);
alloc_strategy_.alloc_sizes[0] = alloc_strategy_.total_alloc_size;
}
void wholememory_impl::each_rank_same_chunk_strategy()
{
size_t data_slot_count = total_size_ / data_granularity_;
size_t data_slot_per_rank = determine_entry_partition_plan(data_slot_count, comm_->world_size);
// each rank allocate same size
alloc_strategy_.local_alloc_size = data_slot_per_rank * data_granularity_;
alloc_strategy_.alignment = comm_->alloc_granularity;
if (total_size_ > HUGE_PAGE_THRESHOLD) {
alloc_strategy_.local_alloc_size =
round_up_unsafe(alloc_strategy_.local_alloc_size, HUGE_PAGE_SIZE);
alloc_strategy_.alignment = HUGE_PAGE_SIZE;
}
alloc_strategy_.total_alloc_size = alloc_strategy_.local_alloc_size * comm_->world_size;
alloc_strategy_.alloc_offsets.clear();
alloc_strategy_.alloc_offsets.resize(comm_->world_size, 0);
for (int i = 0; i < comm_->world_size; i++) {
alloc_strategy_.alloc_offsets[i] = alloc_strategy_.local_alloc_size * i;
}
alloc_strategy_.alloc_sizes.clear();
alloc_strategy_.alloc_sizes.resize(comm_->world_size, alloc_strategy_.local_alloc_size);
}
void wholememory_impl::each_rank_multiple_page_strategy()
{
size_t page_size = comm_->alloc_granularity;
if (total_size_ >= HUGE_PAGE_THRESHOLD) page_size = HUGE_PAGE_SIZE;
alloc_strategy_.alignment = page_size;
alloc_strategy_.total_alloc_size = round_up_unsafe(total_size_, page_size);
size_t total_alloc_page_count = alloc_strategy_.total_alloc_size / page_size;
size_t rank_page_start = comm_->world_rank * total_alloc_page_count / comm_->world_size;
size_t rank_page_end = (comm_->world_rank + 1) * total_alloc_page_count / comm_->world_size;
size_t page_count = rank_page_end - rank_page_start;
alloc_strategy_.local_alloc_size = page_count * page_size;
alloc_strategy_.alloc_offsets.resize(comm_->world_size, 0);
alloc_strategy_.alloc_sizes.resize(comm_->world_size, 0);
for (int i = 0; i < comm_->world_size; i++) {
size_t rank_i_page_start = i * total_alloc_page_count / comm_->world_size;
size_t rank_i_page_end = (i + 1) * total_alloc_page_count / comm_->world_size;
alloc_strategy_.alloc_offsets[i] = rank_i_page_start * page_size;
alloc_strategy_.alloc_sizes[i] = (rank_i_page_end - rank_i_page_start) * page_size;
}
}
int negotiate_handle_id_with_comm_locked(wholememory_comm_t wm_comm)
{
WM_COMM_CHECK_ALL_SAME(wm_comm, WM_MEM_OP_EXCHANGE_ID);
int id = 0;
bool all_same = false;
std::vector<int> rank_ids(wm_comm->world_size);
auto& id_handle_map = wm_comm->wholememory_map;
while (!all_same) {
while (id_handle_map.find(id) != id_handle_map.end())
id++;
wm_comm->host_allgather(&id, rank_ids.data(), 1, WHOLEMEMORY_DT_INT);
int max_id = -1;
all_same = true;
for (int i = 0; i < wm_comm->world_size; i++) {
if (rank_ids[i] > max_id) max_id = rank_ids[i];
if (rank_ids[i] != id) all_same = false;
}
id = max_id;
}
return id;
}
struct wholememory_create_param {
wholememory_create_param() = default;
wholememory_create_param(const struct wholememory_create_param&) = default;
wholememory_create_param(struct wholememory_create_param&&) = default;
wholememory_create_param& operator=(const wholememory_create_param&) = default;
wholememory_create_param& operator=(wholememory_create_param&&) = default;
wholememory_create_param(size_t ts,
wholememory_memory_type_t mt,
wholememory_memory_location_t ml,
size_t mg)
{
total_size = ts;
memory_type = mt;
memory_location = ml;
min_granularity = mg;
}
bool operator==(const wholememory_create_param& rhs) const
{
return total_size == rhs.total_size && memory_type == rhs.memory_type &&
memory_location == rhs.memory_location && min_granularity == rhs.min_granularity;
}
bool operator!=(const wholememory_create_param& rhs) const { return !(*this == rhs); }
size_t total_size;
wholememory_memory_type_t memory_type;
wholememory_memory_location_t memory_location;
size_t min_granularity;
};
wholememory_error_code_t create_wholememory(wholememory_handle_t* wholememory_handle_ptr,
size_t total_size,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
size_t data_granularity) noexcept
{
try {
if (total_size % data_granularity != 0) return WHOLEMEMORY_INVALID_VALUE;
*wholememory_handle_ptr = nullptr;
std::unique_lock<std::mutex> mlock(comm->mu);
auto* whole_memory_handle = new wholememory_handle_();
whole_memory_handle->handle_id = negotiate_handle_id_with_comm_locked(comm);
WM_COMM_CHECK_ALL_SAME(comm, WM_MEM_OP_CREATE);
wholememory_create_param wcp(total_size, memory_type, memory_location, data_granularity);
WM_COMM_CHECK_ALL_SAME(comm, wcp);
if (memory_type == WHOLEMEMORY_MT_DISTRIBUTED) {
whole_memory_handle->impl = new distributed_wholememory_impl(
whole_memory_handle, total_size, comm, memory_type, memory_location, data_granularity);
} else if (memory_type == WHOLEMEMORY_MT_CONTINUOUS) {
if (is_intranode_communicator(comm) || !SupportEGM()) {
if (memory_location == WHOLEMEMORY_ML_HOST) {
whole_memory_handle->impl = new global_mapped_host_wholememory_impl(
whole_memory_handle, total_size, comm, memory_type, memory_location, data_granularity);
} else {
whole_memory_handle->impl = new continuous_device_wholememory_impl(
whole_memory_handle, total_size, comm, memory_type, memory_location, data_granularity);
}
} else {
#if CUDA_VERSION >= 12030
whole_memory_handle->impl = new continuous_mnnvl_wholememory_impl(
whole_memory_handle, total_size, comm, memory_type, memory_location, data_granularity);
#else
WHOLEMEMORY_FAIL_NOTHROW("Multinode CONTINUOUS is only supported on CUDA Version >= 12.3");
#endif
}
} else if (memory_type == WHOLEMEMORY_MT_CHUNKED) {
WHOLEMEMORY_CHECK_NOTHROW(is_intranode_communicator(comm));
if (memory_location == WHOLEMEMORY_ML_HOST) {
whole_memory_handle->impl = new global_mapped_host_wholememory_impl(
whole_memory_handle, total_size, comm, memory_type, memory_location, data_granularity);
} else {
whole_memory_handle->impl = new chunked_device_wholememory_impl(
whole_memory_handle, total_size, comm, memory_type, memory_location, data_granularity);
}
} else {
WHOLEMEMORY_FATAL("Unsupported memory_type (%d) and memory_location (%d).",
(int)memory_type,
(int)memory_location);
}
whole_memory_handle->impl->create_memory();
comm->wholememory_map.insert(
std::pair<int, wholememory_handle_t>(whole_memory_handle->handle_id, whole_memory_handle));
*wholememory_handle_ptr = whole_memory_handle;
return WHOLEMEMORY_SUCCESS;
} catch (const wholememory::cuda_error& wce) {
WHOLEMEMORY_FAIL_NOTHROW("%s", wce.what());
} catch (const raft::logic_error& rle) {
WHOLEMEMORY_FAIL_NOTHROW("%s", rle.what());
} catch (const wholememory::logic_error& wle) {
WHOLEMEMORY_FAIL_NOTHROW("%s", wle.what());
} catch (const raft::exception& re) {
WHOLEMEMORY_FAIL_NOTHROW("%s", re.what());
} catch (...) {
WHOLEMEMORY_FAIL_NOTHROW("Unknown exception.");
}
}
wholememory_error_code_t destroy_wholememory_with_comm_locked(
wholememory_handle_t wholememory_handle) noexcept
{
try {
if (wholememory_handle == nullptr) return WHOLEMEMORY_INVALID_INPUT;
if (wholememory_handle->impl == nullptr) return WHOLEMEMORY_INVALID_INPUT;
auto* comm = wholememory_handle->impl->get_comm();
if (comm == nullptr) return WHOLEMEMORY_INVALID_INPUT;
if (comm->wholememory_map.find(wholememory_handle->handle_id) == comm->wholememory_map.end()) {
return WHOLEMEMORY_INVALID_VALUE;
}
WM_COMM_CHECK_ALL_SAME(comm, WM_MEM_OP_DESTROY);
WM_COMM_CHECK_ALL_SAME(comm, wholememory_handle->handle_id);
comm->wholememory_map.erase(wholememory_handle->handle_id);
delete wholememory_handle;
return WHOLEMEMORY_SUCCESS;
} catch (const wholememory::cuda_error& wce) {
WHOLEMEMORY_FAIL_NOTHROW("%s", wce.what());
} catch (const raft::logic_error& rle) {
WHOLEMEMORY_FAIL_NOTHROW("%s", rle.what());
} catch (const wholememory::logic_error& wle) {
WHOLEMEMORY_FAIL_NOTHROW("%s", wle.what());
} catch (const raft::exception& re) {
WHOLEMEMORY_FAIL_NOTHROW("%s", re.what());
} catch (...) {
WHOLEMEMORY_FAIL_NOTHROW("Unknown exception.");
}
}
wholememory_error_code_t destroy_wholememory(wholememory_handle_t wholememory_handle) noexcept
{
wholememory_comm_t comm = wholememory_handle->impl->get_comm();
std::unique_lock<std::mutex> mlock(comm->mu);
return destroy_wholememory_with_comm_locked(wholememory_handle);
}
wholememory_error_code_t get_communicator_from_handle(
wholememory_comm_t* comm, wholememory_handle_t wholememory_handle) noexcept
{
if (wholememory_handle == nullptr || wholememory_handle->impl == nullptr) {
return WHOLEMEMORY_INVALID_INPUT;
}
*comm = wholememory_handle->impl->get_comm();
return WHOLEMEMORY_SUCCESS;
}
wholememory_memory_type_t get_memory_type(wholememory_handle_t wholememory_handle) noexcept
{
return wholememory_handle->impl->get_type();
}
wholememory_memory_location_t get_memory_location(wholememory_handle_t wholememory_handle) noexcept
{
return wholememory_handle->impl->get_location();
}
size_t get_total_size(wholememory_handle_t wholememory_handle) noexcept
{
return wholememory_handle->impl->total_size();
}
size_t get_data_granularity(wholememory_handle_t wholememory_handle) noexcept
{
return wholememory_handle->impl->data_granularity();
}
wholememory_error_code_t get_local_memory_from_handle(
void** local_ptr,
size_t* local_size,
size_t* local_offset,
wholememory_handle_t wholememory_handle) noexcept
{
if (wholememory_handle == nullptr || wholememory_handle->impl == nullptr) {
return WHOLEMEMORY_INVALID_INPUT;
}
wholememory_handle->impl->get_local_memory(local_ptr, local_size, local_offset);
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t get_rank_memory_from_handle(
void** rank_memory_ptr,
size_t* rank_memory_size,
size_t* rank_memory_offset,
int rank,
wholememory_handle_t wholememory_handle) noexcept
{
if (wholememory_handle == nullptr || wholememory_handle->impl == nullptr) {
return WHOLEMEMORY_INVALID_INPUT;
}
auto* comm = wholememory_handle->impl->get_comm();
if (rank < 0 || rank >= comm->world_size) { return WHOLEMEMORY_INVALID_INPUT; }
if (wholememory_handle->impl->get_rank_memory(
rank_memory_ptr, rank_memory_size, rank_memory_offset, rank) == false) {
return WHOLEMEMORY_INVALID_INPUT;
}
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t get_global_pointer_from_handle(
void** global_ptr, wholememory_handle_t wholememory_handle) noexcept
{
if (wholememory_handle == nullptr || wholememory_handle->impl == nullptr) {
return WHOLEMEMORY_INVALID_INPUT;
}
*global_ptr = wholememory_handle->impl->get_continuous_mapping_pointer();
return (*global_ptr) == nullptr ? WHOLEMEMORY_INVALID_INPUT : WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t get_global_reference_from_handle(
wholememory_gref_t* wholememory_gref, wholememory_handle_t wholememory_handle) noexcept
{
if (wholememory_handle == nullptr || wholememory_handle->impl == nullptr) {
return WHOLEMEMORY_INVALID_INPUT;
}
*wholememory_gref = wholememory_handle->impl->get_global_reference();
return (wholememory_gref->pointer == nullptr) ? WHOLEMEMORY_INVALID_INPUT : WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t determine_partition_plan(size_t* size_per_rank,
size_t total_size,
size_t data_granularity,
int world_size) noexcept
{
if (total_size % data_granularity != 0) { return WHOLEMEMORY_INVALID_VALUE; }
if (size_per_rank == nullptr) { return WHOLEMEMORY_INVALID_INPUT; }
size_t entry_per_rank = 0;
*size_per_rank = determine_entry_partition_plan(total_size / data_granularity, world_size);
return WHOLEMEMORY_SUCCESS;
}
size_t determine_entry_partition_plan(size_t total_entry_count, int world_size) noexcept
{
return div_rounding_up_safe<size_t>(total_entry_count, world_size);
}
wholememory_error_code_t get_partition_plan_from_handle(
size_t* size_per_rank, wholememory_handle_t wholememory_handle) noexcept
{
if (wholememory_handle == nullptr || wholememory_handle->impl == nullptr) {
return WHOLEMEMORY_INVALID_INPUT;
}
*size_per_rank = wholememory_handle->impl->get_partition_stride();
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory
wholememory_handle_::~wholememory_handle_()
{
if (impl != nullptr) {
impl->destroy_memory();
delete impl;
impl = nullptr;
}
}
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/integer_utils.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/integer_utils.hpp>
namespace wholememory {
//! Utility functions
/**
* Finds the smallest integer not less than `number_to_round` and modulo `S` is
* zero. This function assumes that `number_to_round` is non-negative and
* `modulus` is positive.
*/
template <typename S>
inline S round_up_unsafe(S number_to_round, S modulus) noexcept
{
auto remainder = number_to_round % modulus;
if (remainder == 0) { return number_to_round; }
auto rounded_up = number_to_round - remainder + modulus;
return rounded_up;
}
/**
* Divides the left-hand-side by the right-hand-side, rounding up
* to an integral multiple of the right-hand-side, e.g. (9,5) -> 2 , (10,5) -> 2, (11,5) -> 3.
*
* @param dividend the number to divide
* @param divisor the number by which to divide
* @return The least integer multiple of divisor which is greater than or equal to
* the non-integral division dividend/divisor.
*
* @note sensitive to overflow, i.e. if dividend > std::numeric_limits<S>::max() - divisor,
* the result will be incorrect
*/
template <typename S, typename T>
constexpr inline S div_rounding_up_unsafe(const S& dividend, const T& divisor) noexcept
{
return raft::div_rounding_up_unsafe(dividend, divisor);
}
/**
* Divides the left-hand-side by the right-hand-side, rounding up
* to an integral multiple of the right-hand-side, e.g. (9,5) -> 2 , (10,5) -> 2, (11,5) -> 3.
*
* @param dividend the number to divide
* @param divisor the number of by which to divide
* @return The least integer multiple of divisor which is greater than or equal to
* the non-integral division dividend/divisor.
*
* @note will not overflow, and may _or may not_ be slower than the intuitive
* approach of using (dividend + divisor - 1) / divisor
*/
template <typename I>
constexpr inline I div_rounding_up_safe(I dividend, I divisor) noexcept
{
return raft::div_rounding_up_safe<I>(dividend, divisor);
}
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/embedding_optimizer.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/embedding.h>
#include <functional>
#include <map>
#include <memory>
#include <string>
#include "embedding_cache.hpp"
#ifdef __cplusplus
extern "C" {
#endif
struct wholememory_embedding_optimizer_ {
wholememory_optimizer_type_t optimizer_type;
};
#ifdef __cplusplus
}
#endif
namespace wholememory {
class embedding_optimizer_impl_base;
using optimizer_parameter_setter_fn_t = std::function<wholememory_error_code_t(const void*)>;
class optimizer_state_t {
public:
optimizer_state_t() = default;
~optimizer_state_t() = default;
// Per element optimizer states are cachable, like momentums.
// They are packed into same
struct cachable_state {
// name of this state
std::string name;
int start_dim;
int dim;
wholememory_tensor_t global_raw_state_tensor = nullptr;
};
wholememory_embedding_t cachable_state_embedding = nullptr;
// wholememory_tensor_t global_cachable_raw_padded_tensor = nullptr;
wholememory_tensor_t global_cachable_raw_user_tensor = nullptr;
wholememory_tensor_t local_cachable_wm_tensor = nullptr;
// wholememory_tensor_t global_cacheline_tag_wm_tensor = nullptr;
// wholememory_tensor_t global_cacheline_data_wm_tensor = nullptr;
// wholememory_tensor_t local_cacheline_tag_wm_tensor = nullptr;
// wholememory_tensor_t local_cacheline_data_wm_tensor = nullptr;
// per embedding optimizers are uncachable, like betat1 and batat2 for momentums.
struct uncachable_state {
std::string name;
int dim;
wholememory_dtype_t dtype;
wholememory_tensor_t global_raw_padded_tensor = nullptr;
wholememory_tensor_t global_raw_sub_tensor = nullptr;
wholememory_tensor_t local_tensor = nullptr;
};
int64_t local_start_index = -1;
device_cache_for_host* device_cache_for_host_ = nullptr;
std::vector<cachable_state> cachable_states;
std::vector<uncachable_state> uncachable_states;
};
class embedding_optimizer_impl_base : public wholememory_embedding_optimizer_ {
public:
embedding_optimizer_impl_base();
virtual ~embedding_optimizer_impl_base() = default;
virtual wholememory_error_code_t set_parameter(const char* parameter_name, void* value) noexcept;
/**
* Apply gradients.
* As trainable Embedding use READWRITE cache, Cache communicator is the same as Embedding
* communicator. Gradients will be partitioned and each rank is only responsible for its own
* partition.
*
* @param indices : bucketed indices that belongs to current rank.
* @param grads : bucketed gradients that belongs to current rank.
* @param local_embedding : local embedding of current rank.
* @param optimizer_state : pointer to optimizer state.
* @param lr : learning rate
* @param stream : cudaStream_t to use
* @return : wholememory_error_code_t
*/
virtual wholememory_error_code_t step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
optimizer_state_t* optimizer_state,
float lr,
cudaStream_t stream) noexcept = 0;
virtual void create_optimizer_states(optimizer_state_t* optimizer_state,
int embedding_dim) noexcept
{
}
virtual wholememory_error_code_t init_optimizer_states(
optimizer_state_t* optimizer_state) noexcept
{
return WHOLEMEMORY_SUCCESS;
}
[[nodiscard]] const char* const* get_optimizer_state_names() const noexcept
{
return state_names_.data();
}
virtual wholememory_tensor_t get_optimizer_state(optimizer_state_t* optimizer_state,
const char* state_name);
protected:
static optimizer_parameter_setter_fn_t get_float_setter(float* target_ptr);
static void zero_local_state_tensor(wholememory_tensor_t local_state_tensor);
static void set_float_local_state_tensor(wholememory_tensor_t local_state_tensor, float value);
std::map<std::string, optimizer_parameter_setter_fn_t> setter_fns_;
const char* name_ = nullptr;
std::vector<const char*> state_names_ = {nullptr};
};
wholememory_error_code_t create_embedding_optimizer(
wholememory_embedding_optimizer_t* optimizer,
wholememory_optimizer_type_t optimizer_type) noexcept;
wholememory_error_code_t optimizer_set_parameter(wholememory_embedding_optimizer_t optimizer,
const char* parameter_name,
void* value) noexcept;
void destroy_embedding_optimizer(wholememory_embedding_optimizer_t optimizer) noexcept;
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/embedding_optimizer.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "embedding_optimizer.hpp"
#include <cstring>
#include "cuda_macros.hpp"
#include "logger.hpp"
#include "wholememory/embedding.hpp"
#include "wholememory_ops/functions/embedding_optimizer_func.h"
namespace wholememory {
embedding_optimizer_impl_base::embedding_optimizer_impl_base() = default;
wholememory_error_code_t float_setter_fn(float* target_ptr, const void* data)
{
const auto* float_data = static_cast<const float*>(data);
*target_ptr = *float_data;
return WHOLEMEMORY_SUCCESS;
}
optimizer_parameter_setter_fn_t embedding_optimizer_impl_base::get_float_setter(float* target)
{
return std::bind(float_setter_fn, target, std::placeholders::_1);
}
void embedding_optimizer_impl_base::zero_local_state_tensor(wholememory_tensor_t local_state_tensor)
{
void* local_ptr = wholememory_tensor_get_data_pointer(local_state_tensor);
auto* local_state_desc = wholememory_tensor_get_tensor_description(local_state_tensor);
WHOLEMEMORY_CHECK_NOTHROW(local_state_desc->storage_offset == 0);
size_t total_elt_count = wholememory_get_memory_element_count_from_tensor(local_state_desc);
size_t elt_size = wholememory_dtype_get_element_size(local_state_desc->dtype);
size_t total_size = total_elt_count * elt_size;
WM_CUDA_CHECK_NO_THROW(cudaMemset(local_ptr, 0, total_size));
WM_CUDA_CHECK_NO_THROW(cudaDeviceSynchronize());
}
void embedding_optimizer_impl_base::set_float_local_state_tensor(
wholememory_tensor_t local_state_tensor, float value)
{
void* local_ptr = wholememory_tensor_get_data_pointer(local_state_tensor);
auto* local_state_desc = wholememory_tensor_get_tensor_description(local_state_tensor);
WHOLEMEMORY_CHECK_NOTHROW(local_state_desc->storage_offset == 0);
size_t total_elt_count = wholememory_get_memory_element_count_from_tensor(local_state_desc);
wholememory_ops::set_memory_to_float_value(
static_cast<float*>(local_ptr), value, total_elt_count, nullptr);
WM_CUDA_CHECK_NO_THROW(cudaDeviceSynchronize());
}
wholememory_error_code_t embedding_optimizer_impl_base::set_parameter(const char* parameter_name,
void* value) noexcept
{
std::string const parameter_name_str = parameter_name;
auto it = setter_fns_.find(parameter_name_str);
if (it == setter_fns_.end()) {
WHOLEMEMORY_ERROR("parameter name %s is not valid for optimizer %s", parameter_name, name_);
return WHOLEMEMORY_INVALID_INPUT;
}
return it->second(value);
}
wholememory_tensor_t embedding_optimizer_impl_base::get_optimizer_state(
optimizer_state_t* optimizer_state, const char* state_name)
{
WHOLEMEMORY_CHECK_NOTHROW(optimizer_state != nullptr);
WHOLEMEMORY_CHECK_NOTHROW(state_names_.size() == optimizer_state->cachable_states.size() +
optimizer_state->uncachable_states.size() + 1);
WHOLEMEMORY_FAIL_NOTHROW("optimizer state name %s not found for %s", state_name, name_);
for (size_t i = 0; i < optimizer_state->cachable_states.size(); i++) {
if (strcmp(state_name, optimizer_state->cachable_states[i].name.c_str()) == 0) {
WHOLEMEMORY_CHECK_NOTHROW(strcmp(state_name, state_names_[i]) == 0);
return optimizer_state->cachable_states[i].global_raw_state_tensor;
}
}
size_t cachable_state_count = optimizer_state->cachable_states.size();
for (size_t i = 0; i < optimizer_state->uncachable_states.size(); i++) {
if (strcmp(state_name, optimizer_state->uncachable_states[i].name.c_str()) == 0) {
WHOLEMEMORY_CHECK_NOTHROW(strcmp(state_name, state_names_[i + cachable_state_count]) == 0);
return optimizer_state->uncachable_states[i].global_raw_sub_tensor;
}
}
return nullptr;
}
class SGDEmbeddingOptimizer : public embedding_optimizer_impl_base {
public:
SGDEmbeddingOptimizer();
wholememory_error_code_t step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
optimizer_state_t* optimizer_state,
float lr,
cudaStream_t stream) noexcept override;
protected:
float weight_decay = 0.0F;
};
SGDEmbeddingOptimizer::SGDEmbeddingOptimizer()
{
name_ = "SGD";
setter_fns_.emplace(std::pair<std::string, optimizer_parameter_setter_fn_t>(
"weight_decay", get_float_setter(&weight_decay)));
state_names_ = {nullptr};
}
wholememory_error_code_t SGDEmbeddingOptimizer::step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
optimizer_state_t* optimizer_state,
float lr,
cudaStream_t stream) noexcept
{
WHOLEMEMORY_CHECK_NOTHROW(grads != nullptr && indices != nullptr && local_embedding != nullptr &&
optimizer_state != nullptr);
int cache_set_coverage = 0;
wholememory_tensor_t local_embedding_cacheline_tag_wm_tensor = nullptr;
wholememory_tensor_t local_embedding_cacheline_data_wm_tensor = nullptr;
if (optimizer_state->device_cache_for_host_ != nullptr) {
cache_set_coverage = optimizer_state->device_cache_for_host_->get_cache_set_coverage();
auto* local_embedding_cache = optimizer_state->device_cache_for_host_->get_cache_local_data();
local_embedding_cacheline_tag_wm_tensor = local_embedding_cache->cache_line_tag_;
local_embedding_cacheline_data_wm_tensor = local_embedding_cache->cache_line_data_;
}
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_ops::sgd_optimizer_step(indices,
grads,
local_embedding,
local_embedding_cacheline_tag_wm_tensor,
local_embedding_cacheline_data_wm_tensor,
optimizer_state->local_start_index,
cache_set_coverage,
weight_decay,
lr,
stream));
return WHOLEMEMORY_SUCCESS;
}
class LazyAdamEmbeddingOptimizer : public embedding_optimizer_impl_base {
public:
LazyAdamEmbeddingOptimizer();
void create_optimizer_states(optimizer_state_t* optimizer_state,
int embedding_dim) noexcept override;
wholememory_error_code_t init_optimizer_states(
optimizer_state_t* optimizer_state) noexcept override;
wholememory_error_code_t step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
optimizer_state_t* optimizer_state,
float lr,
cudaStream_t stream) noexcept override;
protected:
float weight_decay = 0.0F;
float epsilon = 1E-8;
float beta1 = 0.9F;
float beta2 = 0.999F;
float adam_w = 0.0F;
};
LazyAdamEmbeddingOptimizer::LazyAdamEmbeddingOptimizer()
{
name_ = "LazyAdam";
setter_fns_.emplace(std::pair<std::string, optimizer_parameter_setter_fn_t>(
"weight_decay", get_float_setter(&weight_decay)));
setter_fns_.emplace(
std::pair<std::string, optimizer_parameter_setter_fn_t>("epsilon", get_float_setter(&epsilon)));
setter_fns_.emplace(
std::pair<std::string, optimizer_parameter_setter_fn_t>("beta1", get_float_setter(&beta1)));
setter_fns_.emplace(
std::pair<std::string, optimizer_parameter_setter_fn_t>("beta2", get_float_setter(&beta2)));
setter_fns_.emplace(
std::pair<std::string, optimizer_parameter_setter_fn_t>("adam_w", get_float_setter(&adam_w)));
state_names_ = {"m", "v", "beta12t", nullptr};
}
void LazyAdamEmbeddingOptimizer::create_optimizer_states(optimizer_state_t* optimizer_state,
int embedding_dim) noexcept
{
optimizer_state->cachable_states.resize(2);
auto& m_state = optimizer_state->cachable_states[0];
auto& v_state = optimizer_state->cachable_states[1];
m_state.name = "m";
m_state.dim = embedding_dim;
v_state.name = "v";
v_state.dim = embedding_dim;
optimizer_state->uncachable_states.resize(1);
auto& beta12t_state = optimizer_state->uncachable_states[0];
beta12t_state.name = "beta12t";
beta12t_state.dim = 2;
beta12t_state.dtype = WHOLEMEMORY_DT_FLOAT;
}
wholememory_error_code_t LazyAdamEmbeddingOptimizer::init_optimizer_states(
optimizer_state_t* optimizer_state) noexcept
{
WHOLEMEMORY_CHECK_NOTHROW(optimizer_state->cachable_states.size() == 2);
auto& mv_state = optimizer_state->local_cachable_wm_tensor;
zero_local_state_tensor(mv_state);
auto& per_embedding_local_state = optimizer_state->uncachable_states[0].local_tensor;
set_float_local_state_tensor(per_embedding_local_state, 1.0F);
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t LazyAdamEmbeddingOptimizer::step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
optimizer_state_t* optimizer_state,
float lr,
cudaStream_t stream) noexcept
{
WHOLEMEMORY_CHECK_NOTHROW(grads != nullptr && indices != nullptr && local_embedding != nullptr &&
optimizer_state != nullptr);
int cache_set_coverage = 0;
wholememory_tensor_t local_embedding_cacheline_tag_wm_tensor = nullptr;
wholememory_tensor_t local_embedding_cacheline_data_wm_tensor = nullptr;
wholememory_tensor_t local_state_cacheline_tag_wm_tensor = nullptr;
wholememory_tensor_t local_state_cacheline_data_wm_tensor = nullptr;
if (optimizer_state->device_cache_for_host_ != nullptr) {
cache_set_coverage = optimizer_state->device_cache_for_host_->get_cache_set_coverage();
auto* local_embedding_cache = optimizer_state->device_cache_for_host_->get_cache_local_data();
local_embedding_cacheline_tag_wm_tensor = local_embedding_cache->cache_line_tag_;
local_embedding_cacheline_data_wm_tensor = local_embedding_cache->cache_line_data_;
device_cache_for_host* state_embedding_device_cache = nullptr;
try {
auto* cachable_embedding_base =
static_cast<wholememory::embedding_base*>(optimizer_state->cachable_state_embedding);
state_embedding_device_cache =
dynamic_cast<device_cache_for_host*>(cachable_embedding_base->get_cache_ptr());
} catch (...) {
WHOLEMEMORY_FAIL_NOTHROW(
"cast from cachable_embedding_base->get_cache_ptr() to device_cache_for_host* failed.");
}
WHOLEMEMORY_CHECK_NOTHROW(state_embedding_device_cache != nullptr);
auto* local_state_cache = state_embedding_device_cache->get_cache_local_data();
local_state_cacheline_tag_wm_tensor = local_state_cache->cache_line_tag_;
local_state_cacheline_data_wm_tensor = local_state_cache->cache_line_data_;
}
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_ops::lazy_adam_optimizer_step(indices,
grads,
local_embedding,
local_embedding_cacheline_tag_wm_tensor,
local_embedding_cacheline_data_wm_tensor,
optimizer_state->local_cachable_wm_tensor,
local_state_cacheline_tag_wm_tensor,
local_state_cacheline_data_wm_tensor,
optimizer_state->uncachable_states[0].local_tensor,
optimizer_state->local_start_index,
cache_set_coverage,
weight_decay,
epsilon,
beta1,
beta2,
adam_w > 0.5F,
lr,
stream));
return WHOLEMEMORY_SUCCESS;
}
class AdaGradEmbeddingOptimizer : public embedding_optimizer_impl_base {
public:
AdaGradEmbeddingOptimizer();
void create_optimizer_states(optimizer_state_t* optimizer_state,
int embedding_dim) noexcept override;
wholememory_error_code_t init_optimizer_states(
optimizer_state_t* optimizer_state) noexcept override;
wholememory_error_code_t step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
optimizer_state_t* optimizer_state,
float lr,
cudaStream_t stream) noexcept override;
protected:
float weight_decay = 0.0f;
float epsilon = 1e-8;
};
AdaGradEmbeddingOptimizer::AdaGradEmbeddingOptimizer()
{
name_ = "AdaGrad";
setter_fns_.emplace(std::pair<std::string, optimizer_parameter_setter_fn_t>(
"weight_decay", get_float_setter(&weight_decay)));
setter_fns_.emplace(
std::pair<std::string, optimizer_parameter_setter_fn_t>("epsilon", get_float_setter(&epsilon)));
state_names_ = {"state_sum", nullptr};
}
void AdaGradEmbeddingOptimizer::create_optimizer_states(optimizer_state_t* optimizer_state,
int embedding_dim) noexcept
{
optimizer_state->cachable_states.resize(1);
auto& state_sum_state = optimizer_state->cachable_states[0];
state_sum_state.name = "state_sum";
state_sum_state.dim = embedding_dim;
}
wholememory_error_code_t AdaGradEmbeddingOptimizer::init_optimizer_states(
optimizer_state_t* optimizer_state) noexcept
{
WHOLEMEMORY_CHECK_NOTHROW(optimizer_state->cachable_states.size() == 1);
auto& state_sum_state = optimizer_state->local_cachable_wm_tensor;
zero_local_state_tensor(state_sum_state);
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t AdaGradEmbeddingOptimizer::step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
optimizer_state_t* optimizer_state,
float lr,
cudaStream_t stream) noexcept
{
WHOLEMEMORY_CHECK_NOTHROW(grads != nullptr && indices != nullptr && local_embedding != nullptr &&
optimizer_state != nullptr);
int cache_set_coverage = 0;
wholememory_tensor_t local_embedding_cacheline_tag_wm_tensor = nullptr;
wholememory_tensor_t local_embedding_cacheline_data_wm_tensor = nullptr;
wholememory_tensor_t local_state_cacheline_tag_wm_tensor = nullptr;
wholememory_tensor_t local_state_cacheline_data_wm_tensor = nullptr;
if (optimizer_state->device_cache_for_host_ != nullptr) {
cache_set_coverage = optimizer_state->device_cache_for_host_->get_cache_set_coverage();
auto* local_embedding_cache = optimizer_state->device_cache_for_host_->get_cache_local_data();
local_embedding_cacheline_tag_wm_tensor = local_embedding_cache->cache_line_tag_;
local_embedding_cacheline_data_wm_tensor = local_embedding_cache->cache_line_data_;
device_cache_for_host* state_embedding_device_cache = nullptr;
try {
auto* cachable_embedding_base =
static_cast<wholememory::embedding_base*>(optimizer_state->cachable_state_embedding);
state_embedding_device_cache =
dynamic_cast<device_cache_for_host*>(cachable_embedding_base->get_cache_ptr());
} catch (...) {
WHOLEMEMORY_FAIL_NOTHROW(
"cast from cachable_embedding_base->get_cache_ptr() to device_cache_for_host* failed.");
}
WHOLEMEMORY_CHECK_NOTHROW(state_embedding_device_cache != nullptr);
auto* local_state_cache = state_embedding_device_cache->get_cache_local_data();
local_state_cacheline_tag_wm_tensor = local_state_cache->cache_line_tag_;
local_state_cacheline_data_wm_tensor = local_state_cache->cache_line_data_;
}
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_ops::ada_grad_optimizer_step(indices,
grads,
local_embedding,
local_embedding_cacheline_tag_wm_tensor,
local_embedding_cacheline_data_wm_tensor,
optimizer_state->local_cachable_wm_tensor,
local_state_cacheline_tag_wm_tensor,
local_state_cacheline_data_wm_tensor,
optimizer_state->local_start_index,
cache_set_coverage,
weight_decay,
epsilon,
lr,
stream));
return WHOLEMEMORY_SUCCESS;
}
class RMSPropEmbeddingOptimizer : public embedding_optimizer_impl_base {
public:
RMSPropEmbeddingOptimizer();
void create_optimizer_states(optimizer_state_t* optimizer_state,
int embedding_dim) noexcept override;
wholememory_error_code_t init_optimizer_states(
optimizer_state_t* optimizer_state) noexcept override;
wholememory_error_code_t step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
optimizer_state_t* optimizer_state,
float lr,
cudaStream_t stream) noexcept override;
protected:
float weight_decay = 0.0f;
float epsilon = 1e-8;
float alpha = 0.99;
};
RMSPropEmbeddingOptimizer::RMSPropEmbeddingOptimizer()
{
name_ = "RMSProp";
setter_fns_.emplace(std::pair<std::string, optimizer_parameter_setter_fn_t>(
"weight_decay", get_float_setter(&weight_decay)));
setter_fns_.emplace(
std::pair<std::string, optimizer_parameter_setter_fn_t>("epsilon", get_float_setter(&epsilon)));
setter_fns_.emplace(
std::pair<std::string, optimizer_parameter_setter_fn_t>("alpha", get_float_setter(&alpha)));
state_names_ = {"v", nullptr};
}
void RMSPropEmbeddingOptimizer::create_optimizer_states(optimizer_state_t* optimizer_state,
int embedding_dim) noexcept
{
optimizer_state->cachable_states.resize(1);
auto& v_state = optimizer_state->cachable_states[0];
v_state.name = "v";
v_state.dim = embedding_dim;
}
wholememory_error_code_t RMSPropEmbeddingOptimizer::init_optimizer_states(
optimizer_state_t* optimizer_state) noexcept
{
WHOLEMEMORY_CHECK_NOTHROW(optimizer_state->cachable_states.size() == 1);
auto& v_state = optimizer_state->local_cachable_wm_tensor;
zero_local_state_tensor(v_state);
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t RMSPropEmbeddingOptimizer::step(wholememory_tensor_t indices,
wholememory_tensor_t grads,
wholememory_tensor_t local_embedding,
optimizer_state_t* optimizer_state,
float lr,
cudaStream_t stream) noexcept
{
WHOLEMEMORY_CHECK_NOTHROW(grads != nullptr && indices != nullptr && local_embedding != nullptr &&
optimizer_state != nullptr);
int cache_set_coverage = 0;
wholememory_tensor_t local_embedding_cacheline_tag_wm_tensor = nullptr;
wholememory_tensor_t local_embedding_cacheline_data_wm_tensor = nullptr;
wholememory_tensor_t local_state_cacheline_tag_wm_tensor = nullptr;
wholememory_tensor_t local_state_cacheline_data_wm_tensor = nullptr;
if (optimizer_state->device_cache_for_host_ != nullptr) {
cache_set_coverage = optimizer_state->device_cache_for_host_->get_cache_set_coverage();
auto* local_embedding_cache = optimizer_state->device_cache_for_host_->get_cache_local_data();
local_embedding_cacheline_tag_wm_tensor = local_embedding_cache->cache_line_tag_;
local_embedding_cacheline_data_wm_tensor = local_embedding_cache->cache_line_data_;
device_cache_for_host* state_embedding_device_cache = nullptr;
try {
auto* cachable_embedding_base =
static_cast<wholememory::embedding_base*>(optimizer_state->cachable_state_embedding);
state_embedding_device_cache =
dynamic_cast<device_cache_for_host*>(cachable_embedding_base->get_cache_ptr());
} catch (...) {
WHOLEMEMORY_FAIL_NOTHROW(
"cast from cachable_embedding_base->get_cache_ptr() to device_cache_for_host* failed.");
}
WHOLEMEMORY_CHECK_NOTHROW(state_embedding_device_cache != nullptr);
auto* local_state_cache = state_embedding_device_cache->get_cache_local_data();
local_state_cacheline_tag_wm_tensor = local_state_cache->cache_line_tag_;
local_state_cacheline_data_wm_tensor = local_state_cache->cache_line_data_;
}
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_ops::rms_prop_optimizer_step(indices,
grads,
local_embedding,
local_embedding_cacheline_tag_wm_tensor,
local_embedding_cacheline_data_wm_tensor,
optimizer_state->local_cachable_wm_tensor,
local_state_cacheline_tag_wm_tensor,
local_state_cacheline_data_wm_tensor,
optimizer_state->local_start_index,
cache_set_coverage,
weight_decay,
epsilon,
alpha,
lr,
stream));
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t create_embedding_optimizer(
wholememory_embedding_optimizer_t* optimizer,
wholememory_optimizer_type_t optimizer_type) noexcept
{
embedding_optimizer_impl_base* optimizer_impl = nullptr;
try {
switch (optimizer_type) {
case WHOLEMEMORY_OPT_SGD: {
optimizer_impl = new SGDEmbeddingOptimizer();
break;
}
case WHOLEMEMORY_OPT_LAZY_ADAM: {
optimizer_impl = new LazyAdamEmbeddingOptimizer();
break;
}
case WHOLEMEMORY_OPT_ADAGRAD: {
optimizer_impl = new AdaGradEmbeddingOptimizer();
break;
}
case WHOLEMEMORY_OPT_RMSPROP: {
optimizer_impl = new RMSPropEmbeddingOptimizer();
break;
}
default: {
return WHOLEMEMORY_NOT_IMPLEMENTED;
}
}
} catch (...) {
WHOLEMEMORY_ERROR("create optimizer failed.");
return WHOLEMEMORY_LOGIC_ERROR;
}
*optimizer = static_cast<wholememory_embedding_optimizer_t>(optimizer_impl);
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t optimizer_set_parameter(wholememory_embedding_optimizer_t optimizer,
const char* parameter_name,
void* value) noexcept
{
if (optimizer == nullptr) {
WHOLEMEMORY_ERROR("Input optimizer is nullptr.");
return WHOLEMEMORY_INVALID_INPUT;
}
auto* optimizer_impl = static_cast<embedding_optimizer_impl_base*>(optimizer);
return optimizer_impl->set_parameter(parameter_name, value);
}
void destroy_embedding_optimizer(wholememory_embedding_optimizer_t optimizer) noexcept
{
auto* optimizer_impl = static_cast<embedding_optimizer_impl_base*>(optimizer);
delete optimizer_impl;
}
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/initialize.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "initialize.hpp"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <nccl.h>
#include "communicator.hpp"
#include "cuda_macros.hpp"
#include "error.hpp"
#include "logger.hpp"
namespace wholememory {
static std::mutex mu;
static bool is_wm_init = false;
static const std::string RAFT_NAME = "wholememory";
static cudaDeviceProp* device_props = nullptr;
wholememory_error_code_t init(unsigned int flags) noexcept
{
try {
std::unique_lock<std::mutex> lock(mu);
(void)flags;
WHOLEMEMORY_EXPECTS(!is_wm_init, "WholeMemory has already been initialized.");
WM_CU_CHECK(cuInit(0));
int dev_count = 0;
WM_CUDA_CHECK(cudaGetDeviceCount(&dev_count));
if (dev_count <= 0) {
WHOLEMEMORY_ERROR("init failed, no CUDA device found");
return WHOLEMEMORY_CUDA_ERROR;
}
device_props = new cudaDeviceProp[dev_count];
for (int i = 0; i < dev_count; i++) {
WM_CUDA_CHECK(cudaGetDeviceProperties(device_props + i, i));
}
is_wm_init = true;
return WHOLEMEMORY_SUCCESS;
} catch (raft::logic_error& logic_error) {
WHOLEMEMORY_ERROR("init failed, logic_error=%s", logic_error.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (wholememory::logic_error& wle) {
WHOLEMEMORY_ERROR("init failed, logic_error=%s", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (wholememory::cuda_error& wce) {
WHOLEMEMORY_ERROR("init failed, cuda_error=%s", wce.what());
return WHOLEMEMORY_CUDA_ERROR;
} catch (wholememory::cu_error& wce) {
WHOLEMEMORY_ERROR("init failed, cu_error=%s", wce.what());
return WHOLEMEMORY_CUDA_ERROR;
} catch (...) {
WHOLEMEMORY_ERROR("init failed, Unknown error.");
return WHOLEMEMORY_UNKNOW_ERROR;
}
}
wholememory_error_code_t finalize() noexcept
{
std::unique_lock<std::mutex> lock(mu);
is_wm_init = false;
WHOLEMEMORY_RETURN_ON_FAIL(destroy_all_communicators());
delete[] device_props;
device_props = nullptr;
return WHOLEMEMORY_SUCCESS;
}
cudaDeviceProp* get_device_prop(int dev_id) noexcept
{
try {
if (dev_id == -1) { WM_CUDA_CHECK(cudaGetDevice(&dev_id)); }
WHOLEMEMORY_CHECK(dev_id >= 0);
return device_props + dev_id;
} catch (...) {
WHOLEMEMORY_ERROR("get_device_prop for dev_id=%d failed.", dev_id);
return nullptr;
}
}
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/wholememory.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <wholememory/wholememory.h>
#include "communicator.hpp"
#include "file_io.h"
#include "initialize.hpp"
#include "memory_handle.hpp"
#include "parallel_utils.hpp"
#ifdef __cplusplus
extern "C" {
#endif
wholememory_error_code_t wholememory_init(unsigned int flags) { return wholememory::init(flags); }
wholememory_error_code_t wholememory_finalize() { return wholememory::finalize(); }
wholememory_error_code_t wholememory_create_unique_id(wholememory_unique_id_t* unique_id)
{
return wholememory::create_unique_id(unique_id);
}
wholememory_error_code_t wholememory_create_communicator(wholememory_comm_t* comm,
wholememory_unique_id_t unique_id,
int rank,
int size)
{
return wholememory::create_communicator(comm, unique_id, rank, size);
}
wholememory_error_code_t wholememory_destroy_communicator(wholememory_comm_t comm)
{
return wholememory::destroy_communicator(comm);
}
wholememory_error_code_t wholememory_communicator_support_type_location(
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location)
{
return wholememory::communicator_support_type_location(comm, memory_type, memory_location);
}
wholememory_error_code_t wholememory_communicator_get_rank(int* rank, wholememory_comm_t comm)
{
return wholememory::communicator_get_rank(rank, comm);
}
wholememory_error_code_t wholememory_communicator_get_size(int* size, wholememory_comm_t comm)
{
return wholememory::communicator_get_size(size, comm);
}
wholememory_error_code_t wholememory_communicator_barrier(wholememory_comm_t comm)
{
wholememory::communicator_barrier(comm);
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t wholememory_malloc(wholememory_handle_t* wholememory_handle_ptr,
size_t total_size,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
size_t data_granularity)
{
return wholememory::create_wholememory(
wholememory_handle_ptr, total_size, comm, memory_type, memory_location, data_granularity);
}
wholememory_error_code_t wholememory_free(wholememory_handle_t wholememory_handle)
{
return wholememory::destroy_wholememory(wholememory_handle);
}
wholememory_error_code_t wholememory_get_communicator(wholememory_comm_t* comm,
wholememory_handle_t wholememory_handle)
{
return wholememory::get_communicator_from_handle(comm, wholememory_handle);
}
wholememory_memory_type_t wholememory_get_memory_type(wholememory_handle_t wholememory_handle)
{
return wholememory::get_memory_type(wholememory_handle);
}
wholememory_memory_location_t wholememory_get_memory_location(
wholememory_handle_t wholememory_handle)
{
return wholememory::get_memory_location(wholememory_handle);
}
size_t wholememory_get_total_size(wholememory_handle_t wholememory_handle)
{
return wholememory::get_total_size(wholememory_handle);
}
size_t wholememory_get_data_granularity(wholememory_handle_t wholememory_handle)
{
return wholememory::get_data_granularity(wholememory_handle);
}
wholememory_error_code_t wholememory_get_local_memory(void** local_ptr,
size_t* local_size,
size_t* local_offset,
wholememory_handle_t wholememory_handle)
{
return wholememory::get_local_memory_from_handle(
local_ptr, local_size, local_offset, wholememory_handle);
}
wholememory_error_code_t wholememory_get_rank_memory(void** rank_memory_ptr,
size_t* rank_memory_size,
size_t* rank_memory_offset,
int rank,
wholememory_handle_t wholememory_handle)
{
return wholememory::get_rank_memory_from_handle(
rank_memory_ptr, rank_memory_size, rank_memory_offset, rank, wholememory_handle);
}
wholememory_error_code_t wholememory_get_global_pointer(void** global_ptr,
wholememory_handle_t wholememory_handle)
{
return wholememory::get_global_pointer_from_handle(global_ptr, wholememory_handle);
}
wholememory_error_code_t wholememory_get_global_reference(wholememory_gref_t* wholememory_gref,
wholememory_handle_t wholememory_handle)
{
return wholememory::get_global_reference_from_handle(wholememory_gref, wholememory_handle);
}
wholememory_error_code_t wholememory_determine_partition_plan(size_t* size_per_rank,
size_t total_size,
size_t data_granularity,
int world_size)
{
return wholememory::determine_partition_plan(
size_per_rank, total_size, data_granularity, world_size);
}
wholememory_error_code_t wholememory_determine_entry_partition_plan(size_t* entry_per_rank,
size_t total_entry_count,
int world_size)
{
if (entry_per_rank == nullptr) { return WHOLEMEMORY_INVALID_INPUT; }
*entry_per_rank = wholememory::determine_entry_partition_plan(total_entry_count, world_size);
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t wholememory_get_partition_plan(size_t* size_per_rank,
wholememory_handle_t wholememory_handle)
{
return wholememory::get_partition_plan_from_handle(size_per_rank, wholememory_handle);
}
int fork_get_device_count()
{
try {
return ForkGetDeviceCount();
} catch (...) {
WHOLEMEMORY_ERROR("fork_get_device_count failed.");
return -1;
}
}
wholememory_error_code_t wholememory_load_from_file(wholememory_handle_t wholememory_handle,
size_t memory_offset,
size_t memory_entry_size,
size_t file_entry_size,
const char** file_names,
int file_count)
{
return wholememory::load_file_to_handle(
wholememory_handle, memory_offset, memory_entry_size, file_entry_size, file_names, file_count);
}
wholememory_error_code_t wholememory_store_to_file(wholememory_handle_t wholememory_handle,
size_t memory_offset,
size_t memory_entry_stride,
size_t file_entry_size,
const char* local_file_name)
{
return wholememory::store_handle_to_file(
wholememory_handle, memory_offset, memory_entry_stride, file_entry_size, local_file_name);
}
wholememory_error_code_t wholememory_load_hdfs_support() { return WHOLEMEMORY_NOT_IMPLEMENTED; }
wholememory_error_code_t wholememory_load_from_hdfs_file(wholememory_handle_t wholememory_handle,
size_t memory_offset,
size_t memory_entry_size,
size_t file_entry_size,
const char* hdfs_host,
int hdfs_port,
const char* hdfs_user,
const char* hdfs_path,
const char* hdfs_prefix)
{
return WHOLEMEMORY_NOT_IMPLEMENTED;
}
#ifdef __cplusplus
}
#endif
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/system_info.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "system_info.hpp"
#include <string>
#include "cuda_macros.hpp"
bool DevAttrPagebleMemoryAccess()
{
int current_dev_id = -1;
WM_CUDA_CHECK_NO_THROW(cudaGetDevice(¤t_dev_id));
int value = 0;
cudaDeviceAttr attr = cudaDevAttrPageableMemoryAccess;
WM_CUDA_CHECK_NO_THROW(cudaDeviceGetAttribute(&value, attr, current_dev_id));
return value > 0;
}
bool DeviceCanAccessPeer(int peer_device)
{
int current_dev_id = -1;
WM_CUDA_CHECK_NO_THROW(cudaGetDevice(¤t_dev_id));
int can_access = 0;
WM_CUDA_CHECK_NO_THROW(cudaDeviceCanAccessPeer(&can_access, current_dev_id, peer_device));
return can_access > 0;
}
bool DevicesCanAccessP2P(const int* dev_ids, int count)
{
if (count <= 1) return true;
int current_dev_id = -1;
WM_CUDA_CHECK_NO_THROW(cudaGetDevice(¤t_dev_id));
bool all_can_access = true;
for (int i = 0; i < count && all_can_access; i++) {
int src_dev = dev_ids[i];
WM_CUDA_CHECK_NO_THROW(cudaSetDevice(src_dev));
for (int j = 0; j < count; j++) {
if (j == i) continue;
int peer_dev = dev_ids[j];
int can_access = 0;
WM_CUDA_CHECK_NO_THROW(cudaDeviceCanAccessPeer(&can_access, src_dev, peer_dev));
if (can_access == 0) {
all_can_access = false;
break;
}
}
}
WM_CUDA_CHECK_NO_THROW(cudaSetDevice(current_dev_id));
return all_can_access;
}
int GetCudaCompCap()
{
int cuda_dev;
WM_CUDA_CHECK_NO_THROW(cudaGetDevice(&cuda_dev));
int cc_major, cc_minor;
WM_CUDA_CHECK_NO_THROW(
cudaDeviceGetAttribute(&cc_major, cudaDevAttrComputeCapabilityMajor, cuda_dev));
WM_CUDA_CHECK_NO_THROW(
cudaDeviceGetAttribute(&cc_minor, cudaDevAttrComputeCapabilityMinor, cuda_dev));
return cc_major * 10 + cc_minor;
}
const char* GetCPUArch()
{
#if defined(__PPC__)
static const char* arch_str = "ppc64";
#elif defined(__aarch64__)
static const char* arch_str = "arm64";
#elif defined(__x86_64__)
static const char* arch_str = "x86_64";
#endif
return arch_str;
}
bool SupportMNNVL()
{
// TODO: replace with NVML, nvmlDeviceGetGpuFabricInfo
return GetCudaCompCap() >= 90;
}
bool SupportEGM()
{
std::string const arch_str = GetCPUArch();
return arch_str == "arm64" && DevAttrPagebleMemoryAccess();
}
bool SupportMNNVLForEGM() { return SupportMNNVL() && SupportEGM(); }
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/embedding.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <wholememory/embedding.h>
#include <cuda_runtime_api.h>
#include <wholememory/env_func_ptrs.h>
#include <wholememory/wholememory_op.h>
#include <memory>
#include "cuda_macros.hpp"
#include "embedding.hpp"
#include "embedding_optimizer.hpp"
#include "error.hpp"
#include "integer_utils.hpp"
#include "logger.hpp"
#include "wholememory_ops/functions/embedding_cache_func.h"
#include "wholememory_ops/functions/exchange_embeddings_nccl_func.h"
#include "wholememory_ops/functions/exchange_ids_nccl_func.h"
#include "wholememory_ops/functions/gather_cached_func.h"
#include "wholememory_ops/functions/gather_scatter_func.h"
#include "wholememory_ops/temp_memory_handle.hpp"
#include "wholememory_ops/thrust_allocator.hpp"
namespace wholememory {
static int64_t align_embedding_dim(int64_t embedding_dim, size_t element_size)
{
int64_t const align_count = 16 / element_size;
int64_t const embedding_stride = embedding_dim % align_count == 0
? embedding_dim
: (embedding_dim / align_count + 1) * align_count;
return embedding_stride;
}
wholememory_error_code_t embedding_base::allocate(
wholememory_matrix_description_t* embedding_description,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
wholememory_embedding_cache_policy_t policy,
wholememory_embedding_optimizer_t opt) noexcept
{
cache_policy = policy;
optimizer = opt;
raw_embedding_comm_ = comm;
wholememory_tensor_description_t padded_embedding_tensor_description;
try {
if (optimizer != nullptr && embedding_description->dtype != WHOLEMEMORY_DT_FLOAT) {
WHOLEMEMORY_ERROR("Only float embedding supports training.");
return WHOLEMEMORY_NOT_IMPLEMENTED;
}
if (cache_policy != nullptr) {
WHOLEMEMORY_CHECK_NOTHROW(cache_policy->cache_comm != nullptr);
if (cache_policy->cache_comm != comm) {
cache_ptr_ = new wholememory::local_cache_for_global(cache_policy);
} else {
cache_ptr_ = new wholememory::device_cache_for_host(cache_policy);
}
WHOLEMEMORY_RETURN_ON_FAIL(
cache_ptr_->get_embedding_requirement(&padded_embedding_tensor_description,
*embedding_description,
comm,
memory_type,
memory_location));
} else {
wholememory_copy_matrix_desc_to_tensor(&padded_embedding_tensor_description,
embedding_description);
int64_t const embedding_dim = embedding_description->sizes[1];
size_t const element_size = wholememory_dtype_get_element_size(embedding_description->dtype);
int64_t const embedding_stride = align_embedding_dim(embedding_dim, element_size);
padded_embedding_tensor_description.storage_offset = 0;
padded_embedding_tensor_description.strides[0] = embedding_stride;
padded_embedding_tensor_description.strides[1] = 1;
}
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_create_tensor(&allocated_embedding,
&padded_embedding_tensor_description,
comm,
memory_type,
memory_location));
int64_t starts[2] = {0, 0};
int64_t ends[2] = {embedding_description->sizes[0], embedding_description->sizes[1]};
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_tensor_get_subtensor(allocated_embedding, &starts[0], &ends[0], &user_embedding));
if (cache_ptr_ != nullptr) { WHOLEMEMORY_RETURN_ON_FAIL(cache_ptr_->allocate(user_embedding)); }
if (optimizer != nullptr) {
if (cache_policy != nullptr) {
WHOLEMEMORY_CHECK_NOTHROW(cache_policy->access_type == WHOLEMEMORY_AT_READWRITE);
}
optimizer_impl_base_ = static_cast<embedding_optimizer_impl_base*>(optimizer);
WHOLEMEMORY_RETURN_ON_FAIL(create_optimizer_states());
WHOLEMEMORY_RETURN_ON_FAIL(init_optimizer_states());
}
} catch (std::bad_alloc& sba) {
WHOLEMEMORY_ERROR("bad_alloc");
return WHOLEMEMORY_OUT_OF_MEMORY;
} catch (...) {
WHOLEMEMORY_ERROR("Unknown error");
return WHOLEMEMORY_UNKNOW_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t embedding_base::gather_gradient_apply(wholememory_tensor_t indices,
wholememory_tensor_t grads,
bool adjust_cache,
float lr,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream)
{
auto* indice_desc = wholememory_tensor_get_tensor_description(indices);
auto* grads_desc = wholememory_tensor_get_tensor_description(grads);
auto* embedding_desc = wholememory_tensor_get_tensor_description(allocated_embedding);
WHOLEMEMORY_CHECK_NOTHROW(indice_desc->dim == 1);
wholememory_ops::temp_memory_handle host_recv_rank_id_count_handle(p_env_fns),
host_rank_id_count_handle(p_env_fns);
wholememory_ops::temp_memory_handle dev_recv_indices_buffer_handle(p_env_fns);
wholememory_ops::temp_memory_handle dev_raw_indice_handle(p_env_fns);
size_t const embedding_entry_count_per_rank =
wholememory_tensor_get_entry_per_partition(allocated_embedding);
wholememory_ops::wm_thrust_allocator thrust_allocator(p_env_fns);
int world_size = -1, world_rank = -1;
int64_t* host_recv_rank_id_count_ptr = nullptr;
int64_t* host_rank_id_count_ptr = nullptr;
int64_t* dev_raw_indice_ptr = nullptr;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_size(&world_size, raw_embedding_comm_));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_rank(&world_rank, raw_embedding_comm_));
host_recv_rank_id_count_ptr = static_cast<int64_t*>(
host_recv_rank_id_count_handle.pinned_malloc(world_size, WHOLEMEMORY_DT_INT64));
host_rank_id_count_ptr = static_cast<int64_t*>(
host_rank_id_count_handle.pinned_malloc(world_size, WHOLEMEMORY_DT_INT64));
dev_raw_indice_ptr = static_cast<int64_t*>(
dev_raw_indice_handle.device_malloc(indice_desc->sizes[0], WHOLEMEMORY_DT_INT64));
wholememory_array_description_t indice_array_desc;
WHOLEMEMORY_CHECK_NOTHROW(
wholememory_convert_tensor_desc_to_array(&indice_array_desc, indice_desc));
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_ops::bucket_and_exchange_ids_func(wholememory_tensor_get_data_pointer(indices),
indice_array_desc,
host_recv_rank_id_count_ptr,
host_rank_id_count_ptr,
&dev_recv_indices_buffer_handle,
dev_raw_indice_ptr,
embedding_entry_count_per_rank,
raw_embedding_comm_,
&thrust_allocator,
p_env_fns,
stream));
int64_t total_recv_count = 0;
for (int rank_id = 0; rank_id < world_size; rank_id++) {
total_recv_count += host_recv_rank_id_count_ptr[rank_id];
}
wholememory_ops::temp_memory_handle temp_grad_send_buffer_handle(p_env_fns),
temp_grad_recv_buffer_handle(p_env_fns);
void* temp_grad_send_buffer = temp_grad_send_buffer_handle.device_malloc(
grads_desc->sizes[0] * grads_desc->sizes[1], grads_desc->dtype);
void* temp_grad_recv_buffer = temp_grad_recv_buffer_handle.device_malloc(
total_recv_count * grads_desc->sizes[1], grads_desc->dtype);
auto grads_gref =
wholememory_create_continuous_global_reference(wholememory_tensor_get_data_pointer(grads));
wholememory_matrix_description_t grads_mat_desc, temp_grad_send_desc;
WHOLEMEMORY_CHECK_NOTHROW(wholememory_convert_tensor_desc_to_matrix(&grads_mat_desc, grads_desc));
temp_grad_send_desc = grads_mat_desc;
temp_grad_send_desc.stride = temp_grad_send_desc.sizes[1];
wholememory_array_description_t raw_indice_desc = indice_array_desc;
raw_indice_desc.dtype = WHOLEMEMORY_DT_INT64;
raw_indice_desc.storage_offset = 0;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_ops::gather_func(grads_gref,
grads_mat_desc,
dev_raw_indice_ptr,
raw_indice_desc,
temp_grad_send_buffer,
temp_grad_send_desc,
stream));
WM_CUDA_DEBUG_SYNC_STREAM(stream);
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_ops::exchange_embeddings_nccl_func(
temp_grad_send_buffer,
host_rank_id_count_ptr,
host_recv_rank_id_count_ptr,
temp_grad_recv_buffer,
grads_desc->sizes[1] * wholememory_dtype_get_element_size(grads_desc->dtype),
raw_embedding_comm_,
stream));
wholememory_ops::temp_memory_handle dedup_indice_recv_buffer_handle(p_env_fns);
wholememory_ops::temp_memory_handle dedup_grad_recv_buffer_handle(p_env_fns);
void* dedup_indice =
dedup_indice_recv_buffer_handle.device_malloc(total_recv_count, indice_desc->dtype);
float* dedup_grads = static_cast<float*>(dedup_grad_recv_buffer_handle.device_malloc(
total_recv_count * grads_desc->sizes[1], grads_desc->dtype));
wholememory_array_description_t recv_indice_array_desc = indice_array_desc;
recv_indice_array_desc.size = total_recv_count;
wholememory_matrix_description_t recv_grad_matrix_desc = grads_mat_desc;
recv_grad_matrix_desc.sizes[0] = total_recv_count;
recv_grad_matrix_desc.stride = grads_mat_desc.sizes[1];
int64_t const deduped_count =
wholememory_ops::dedup_indice_and_gradients(dev_recv_indices_buffer_handle.pointer(),
recv_indice_array_desc,
static_cast<const float*>(temp_grad_recv_buffer),
recv_grad_matrix_desc,
dedup_indice,
dedup_grads,
p_env_fns,
stream);
wholememory_array_description_t update_indice_desc = indice_array_desc;
update_indice_desc.size = deduped_count;
if (adjust_cache && cache_ptr_ != nullptr) {
WHOLEMEMORY_CHECK_NOTHROW(cache_ptr_ != nullptr);
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_ops::update_cache_direct_same_comm(dedup_indice,
update_indice_desc,
user_embedding,
cache_ptr_->get_cache_local_data(),
cache_ptr_->get_cache_set_coverage(),
p_env_fns,
stream));
}
auto* state_embedding = optimizer_state_->cachable_state_embedding;
if (adjust_cache && cache_ptr_ != nullptr && state_embedding != nullptr) {
WHOLEMEMORY_CHECK_NOTHROW(cache_ptr_ != nullptr);
WHOLEMEMORY_CHECK_NOTHROW(optimizer_state_.get() != nullptr);
WHOLEMEMORY_CHECK_NOTHROW(state_embedding != nullptr);
embedding_base* state_embedding_base = static_cast<embedding_base*>(state_embedding);
WHOLEMEMORY_CHECK_NOTHROW(state_embedding_base->cache_ptr_ != nullptr);
wholememory_embedding_get_embedding_tensor(state_embedding);
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_ops::update_cache_direct_same_comm(
dedup_indice,
update_indice_desc,
wholememory_embedding_get_embedding_tensor(state_embedding),
state_embedding_base->cache_ptr_->get_cache_local_data(),
state_embedding_base->cache_ptr_->get_cache_set_coverage(),
p_env_fns,
stream));
}
WHOLEMEMORY_CHECK_NOTHROW(optimizer_impl_base_ != nullptr);
wholememory_tensor_t dedup_indice_tensor, dedup_grad_tensor;
wholememory_tensor_description_t recv_indice_tensor_desc = *indice_desc;
recv_indice_tensor_desc.sizes[0] = deduped_count;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_make_tensor_from_pointer(
&dedup_indice_tensor, dedup_indice, &recv_indice_tensor_desc));
wholememory_tensor_description_t recv_grad_tensor_desc = *grads_desc;
recv_grad_tensor_desc.sizes[0] = deduped_count;
recv_grad_tensor_desc.strides[0] = recv_grad_tensor_desc.sizes[1];
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_make_tensor_from_pointer(&dedup_grad_tensor, dedup_grads, &recv_grad_tensor_desc));
wholememory_tensor_t local_embedding;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_tensor_map_local_tensor(user_embedding, &local_embedding));
WHOLEMEMORY_RETURN_ON_FAIL(optimizer_impl_base_->step(
dedup_indice_tensor, dedup_grad_tensor, local_embedding, optimizer_state_.get(), lr, stream));
wholememory_destroy_tensor(dedup_indice_tensor);
wholememory_destroy_tensor(dedup_grad_tensor);
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t embedding_base::create_optimizer_states() noexcept
{
wholememory_comm_t wm_raw_comm;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_get_communicator(
&wm_raw_comm, wholememory_tensor_get_memory_handle(allocated_embedding)));
int world_rank, world_size;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_rank(&world_rank, wm_raw_comm));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_size(&world_size, wm_raw_comm));
auto* allocated_tensor_desc = wholememory_tensor_get_tensor_description(allocated_embedding);
auto* user_tensor_desc = wholememory_tensor_get_tensor_description(user_embedding);
int64_t start[2] = {0, 0};
int64_t end[2] = {user_tensor_desc->sizes[1], -1};
size_t entry_per_rank;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_determine_entry_partition_plan(
&entry_per_rank, allocated_tensor_desc->sizes[0], world_size));
optimizer_state_ = std::make_unique<optimizer_state_t>();
optimizer_state_->local_start_index = entry_per_rank * world_rank;
optimizer_impl_base_->create_optimizer_states(optimizer_state_.get(), user_tensor_desc->sizes[1]);
bool const need_cachable_states = !optimizer_state_->cachable_states.empty();
wholememory_tensor_description_t cachable_state_desc;
if (cache_ptr_ != nullptr) {
try {
optimizer_state_->device_cache_for_host_ = dynamic_cast<device_cache_for_host*>(cache_ptr_);
} catch (...) {
WHOLEMEMORY_FAIL_NOTHROW("cast from embedding_cache_base* to device_cache_for_host* failed.");
}
}
if (need_cachable_states) {
std::vector<int> embedding_offset(optimizer_state_->cachable_states.size(), 0);
size_t element_size = wholememory_dtype_get_element_size(user_tensor_desc->dtype);
int all_state_embedding_count = 0;
for (size_t i = 0; i < embedding_offset.size(); i++) {
auto& c_state = optimizer_state_->cachable_states[i];
embedding_offset[i] = all_state_embedding_count;
int state_embedding_dim = c_state.dim;
int aligned_embedding_dim = align_embedding_dim(state_embedding_dim, element_size);
all_state_embedding_count += aligned_embedding_dim;
}
cachable_state_desc = *user_tensor_desc;
cachable_state_desc.sizes[1] = all_state_embedding_count;
cachable_state_desc.strides[0] = all_state_embedding_count;
auto allocated_handle = wholememory_tensor_get_memory_handle(allocated_embedding);
auto memory_type = wholememory_get_memory_type(allocated_handle);
auto memory_location = wholememory_get_memory_location(allocated_handle);
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_create_embedding(&optimizer_state_->cachable_state_embedding,
&cachable_state_desc,
raw_embedding_comm_,
memory_type,
memory_location,
nullptr,
cache_policy));
optimizer_state_->global_cachable_raw_user_tensor =
wholememory_embedding_get_embedding_tensor(optimizer_state_->cachable_state_embedding);
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_tensor_map_local_tensor(optimizer_state_->global_cachable_raw_user_tensor,
&optimizer_state_->local_cachable_wm_tensor));
for (size_t i = 0; i < embedding_offset.size(); i++) {
auto& c_state = optimizer_state_->cachable_states[i];
c_state.start_dim = embedding_offset[i];
start[1] = embedding_offset[i];
end[1] = start[1] + c_state.dim;
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_tensor_get_subtensor(optimizer_state_->global_cachable_raw_user_tensor,
start,
end,
&c_state.global_raw_state_tensor));
}
}
for (auto& uc_state : optimizer_state_->uncachable_states) {
auto uc_desc = *allocated_tensor_desc;
uc_desc.dtype = uc_state.dtype;
uc_desc.sizes[1] = uc_desc.strides[0] = uc_state.dim;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_create_tensor(&uc_state.global_raw_padded_tensor,
&uc_desc,
wm_raw_comm,
WHOLEMEMORY_MT_DISTRIBUTED,
WHOLEMEMORY_ML_DEVICE));
start[0] = 0;
start[1] = 0;
end[0] = user_tensor_desc->sizes[0];
end[1] = uc_state.dim;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_tensor_get_subtensor(
uc_state.global_raw_padded_tensor, start, end, &uc_state.global_raw_sub_tensor));
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_tensor_map_local_tensor(uc_state.global_raw_sub_tensor, &uc_state.local_tensor));
}
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t embedding_base::destroy_optimizer_states() noexcept
{
for (auto& c_state : optimizer_state_->cachable_states) {
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_destroy_tensor(c_state.global_raw_state_tensor));
}
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_destroy_tensor(optimizer_state_->local_cachable_wm_tensor));
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_destroy_tensor(optimizer_state_->global_cachable_raw_user_tensor));
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_destroy_embedding(optimizer_state_->cachable_state_embedding));
optimizer_state_->cachable_states.clear();
for (auto& uc_state : optimizer_state_->uncachable_states) {
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_destroy_tensor(uc_state.local_tensor));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_destroy_tensor(uc_state.global_raw_sub_tensor));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_destroy_tensor(uc_state.global_raw_padded_tensor));
}
return WHOLEMEMORY_SUCCESS;
}
void embedding_base::deallocate() noexcept
{
if (optimizer != nullptr) {
WHOLEMEMORY_CHECK_NOTHROW(destroy_optimizer_states() == WHOLEMEMORY_SUCCESS);
}
if (cache_ptr_ != nullptr) {
delete cache_ptr_;
cache_ptr_ = nullptr;
}
WHOLEMEMORY_CHECK_NOTHROW(wholememory_destroy_tensor(user_embedding) == WHOLEMEMORY_SUCCESS);
WHOLEMEMORY_CHECK_NOTHROW(wholememory_destroy_tensor(allocated_embedding) == WHOLEMEMORY_SUCCESS);
}
wholememory_error_code_t embedding_base::writeback_embedding_cache(
cudaStream_t stream) const noexcept
{
if (cache_ptr_ != nullptr) {
WHOLEMEMORY_RETURN_ON_FAIL(cache_ptr_->writeback_all_cache(stream));
}
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t embedding_base::writeback_all_caches(cudaStream_t stream) const noexcept
{
WHOLEMEMORY_RETURN_ON_FAIL(writeback_embedding_cache(stream));
if (optimizer_impl_base_ != nullptr) {
WHOLEMEMORY_CHECK_NOTHROW(optimizer_state_.get() != nullptr);
if (optimizer_state_->cachable_state_embedding != nullptr) {
WHOLEMEMORY_RETURN_ON_FAIL(
static_cast<embedding_base*>(optimizer_state_->cachable_state_embedding)
->writeback_all_caches(stream));
}
}
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t embedding_base::drop_embedding_cache(cudaStream_t stream) const noexcept
{
if (cache_ptr_ != nullptr) { WHOLEMEMORY_RETURN_ON_FAIL(cache_ptr_->drop_all_cache(stream)); }
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t embedding_base::drop_all_caches(cudaStream_t stream) const noexcept
{
WHOLEMEMORY_RETURN_ON_FAIL(drop_embedding_cache(stream));
if (optimizer_impl_base_ != nullptr) {
WHOLEMEMORY_CHECK_NOTHROW(optimizer_state_.get() != nullptr);
if (optimizer_state_->cachable_state_embedding != nullptr) {
WHOLEMEMORY_RETURN_ON_FAIL(
static_cast<embedding_base*>(optimizer_state_->cachable_state_embedding)
->drop_all_caches(stream));
}
}
return WHOLEMEMORY_SUCCESS;
}
class noncached_embedding : public embedding_base {
public:
noncached_embedding() = default;
virtual ~noncached_embedding() = default;
wholememory_error_code_t gather(wholememory_tensor_t indices,
wholememory_tensor_t output,
bool adjust_cache,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream) noexcept override;
};
wholememory_error_code_t noncached_embedding::gather(wholememory_tensor_t indices,
wholememory_tensor_t output,
bool adjust_cache,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream) noexcept
{
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_gather(allocated_embedding, indices, output, p_env_fns, stream));
return WHOLEMEMORY_SUCCESS;
}
class device_cached_host_embedding : public embedding_base {
public:
device_cached_host_embedding() = default;
virtual ~device_cached_host_embedding() = default;
wholememory_error_code_t gather(wholememory_tensor_t indices,
wholememory_tensor_t output,
bool adjust_cache,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream) noexcept override;
};
wholememory_error_code_t device_cached_host_embedding::gather(wholememory_tensor_t indices,
wholememory_tensor_t output,
bool adjust_cache,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream) noexcept
{
auto* indice_desc = wholememory_tensor_get_tensor_description(indices);
auto* output_desc = wholememory_tensor_get_tensor_description(output);
auto* embedding_desc = wholememory_tensor_get_tensor_description(allocated_embedding);
WHOLEMEMORY_CHECK_NOTHROW(indice_desc->dim == 1);
wholememory_ops::temp_memory_handle host_recv_rank_id_count_handle(p_env_fns),
host_rank_id_count_handle(p_env_fns);
wholememory_ops::temp_memory_handle dev_recv_indices_buffer_handle(p_env_fns);
wholememory_ops::temp_memory_handle dev_raw_indice_handle(p_env_fns);
size_t const embedding_entry_count_per_rank =
wholememory_tensor_get_entry_per_partition(allocated_embedding);
wholememory_ops::wm_thrust_allocator thrust_allocator(p_env_fns);
int world_size = -1, world_rank = -1;
int64_t* host_recv_rank_id_count_ptr = nullptr;
int64_t* host_rank_id_count_ptr = nullptr;
int64_t* dev_raw_indice_ptr = nullptr;
int64_t total_recv_count = 0;
if (adjust_cache || cache_policy->cache_memory_type == WHOLEMEMORY_MT_DISTRIBUTED) {
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_size(&world_size, raw_embedding_comm_));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_rank(&world_rank, raw_embedding_comm_));
host_recv_rank_id_count_ptr = static_cast<int64_t*>(
host_recv_rank_id_count_handle.pinned_malloc(world_size, WHOLEMEMORY_DT_INT64));
host_rank_id_count_ptr = static_cast<int64_t*>(
host_rank_id_count_handle.pinned_malloc(world_size, WHOLEMEMORY_DT_INT64));
dev_raw_indice_ptr = static_cast<int64_t*>(
dev_raw_indice_handle.device_malloc(indice_desc->sizes[0], WHOLEMEMORY_DT_INT64));
wholememory_array_description_t indice_array_desc;
WHOLEMEMORY_CHECK_NOTHROW(
wholememory_convert_tensor_desc_to_array(&indice_array_desc, indice_desc));
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_ops::bucket_and_exchange_ids_func(wholememory_tensor_get_data_pointer(indices),
indice_array_desc,
host_recv_rank_id_count_ptr,
host_rank_id_count_ptr,
&dev_recv_indices_buffer_handle,
dev_raw_indice_ptr,
embedding_entry_count_per_rank,
raw_embedding_comm_,
&thrust_allocator,
p_env_fns,
stream));
if (adjust_cache) {
total_recv_count = 0;
for (int i = 0; i < world_size; i++) {
total_recv_count += host_recv_rank_id_count_ptr[i];
}
auto update_indice_desc =
wholememory_create_array_desc(total_recv_count, 0, indice_desc->dtype);
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_ops::update_cache_direct_same_comm(dev_recv_indices_buffer_handle.pointer(),
update_indice_desc,
allocated_embedding,
cache_ptr_->get_cache_local_data(),
cache_ptr_->get_cache_set_coverage(),
p_env_fns,
stream));
WM_CUDA_CHECK_NO_THROW(cudaStreamSynchronize(stream));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_barrier(cache_policy->cache_comm));
}
}
if (cache_policy->cache_memory_type == WHOLEMEMORY_MT_DISTRIBUTED) {
// Local Gather
total_recv_count = 0;
for (int i = 0; i < world_size; i++) {
total_recv_count += host_recv_rank_id_count_ptr[i];
}
wholememory_ops::temp_memory_handle dev_local_gather_buffer(p_env_fns);
wholememory_ops::temp_memory_handle dev_embedding_recv_buffer(p_env_fns);
void* dev_local_gather_buffer_ptr = dev_local_gather_buffer.device_malloc(
embedding_desc->sizes[1] * total_recv_count, output_desc->dtype);
void* dev_embedding_recv_buffer_ptr = dev_embedding_recv_buffer.device_malloc(
embedding_desc->sizes[1] * indice_desc->sizes[0], output_desc->dtype);
wholememory_tensor_t local_raw_tensor;
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_tensor_map_local_tensor(allocated_embedding, &local_raw_tensor));
wholememory_gref_t local_raw_gref;
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_tensor_get_global_reference(local_raw_tensor, &local_raw_gref));
wholememory_tensor_t cached_embedding_local_tensor =
cache_ptr_->get_cache_local_data()->cache_line_data_;
wholememory_gref_t cached_embedding_gref;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_tensor_get_global_reference(
cached_embedding_local_tensor, &cached_embedding_gref));
wholememory_gref_t cache_line_tag_gref;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_tensor_get_global_reference(
cache_ptr_->get_cache_local_data()->cache_line_tag_, &cache_line_tag_gref));
int64_t const rank_start_gid =
wholememory_tensor_get_entry_per_partition(allocated_embedding) * world_rank;
wholememory_tensor_description_t recv_indices_desc;
auto recv_indices_array_desc =
wholememory_create_array_desc(total_recv_count, 0, indice_desc->dtype);
wholememory_copy_array_desc_to_tensor(&recv_indices_desc, &recv_indices_array_desc);
wholememory_tensor_description_t local_gather_desc = *output_desc;
local_gather_desc.sizes[0] = total_recv_count;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_ops::gather_cached_func(
local_raw_gref,
wholememory_tensor_get_tensor_description(local_raw_tensor),
cached_embedding_gref,
wholememory_tensor_get_tensor_description(cached_embedding_local_tensor),
cache_line_tag_gref,
dev_recv_indices_buffer_handle.pointer(),
&recv_indices_desc,
dev_local_gather_buffer_ptr,
&local_gather_desc,
cache_ptr_->get_cache_set_coverage(),
rank_start_gid,
rank_start_gid,
stream));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_destroy_tensor(local_raw_tensor));
// AllToAllV
wholememory_comm_t wm_comm;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_get_communicator(
&wm_comm, wholememory_tensor_get_memory_handle(allocated_embedding)));
size_t const embedding_size =
embedding_desc->sizes[1] * wholememory_dtype_get_element_size(output_desc->dtype);
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_ops::exchange_embeddings_nccl_func(dev_local_gather_buffer_ptr,
host_recv_rank_id_count_ptr,
host_rank_id_count_ptr,
dev_embedding_recv_buffer_ptr,
embedding_size,
wm_comm,
stream));
WM_CUDA_DEBUG_SYNC_STREAM(stream);
// Local reorder
wholememory_gref_t output_gref =
wholememory_create_continuous_global_reference(wholememory_tensor_get_data_pointer(output));
wholememory_matrix_description_t local_recv_buffer_desc = wholememory_create_matrix_desc(
output_desc->sizes, output_desc->sizes[1], 0, output_desc->dtype);
auto raw_indice_desc =
wholememory_create_array_desc(indice_desc->sizes[0], 0, WHOLEMEMORY_DT_INT64);
int64_t total_need_scatter_count = 0;
for (int i = 0; i < world_size; i++) {
total_need_scatter_count += host_rank_id_count_ptr[i];
}
local_recv_buffer_desc.sizes[0] = total_need_scatter_count;
raw_indice_desc.size = total_need_scatter_count;
wholememory_matrix_description_t output_matrix_desc;
WHOLEMEMORY_CHECK_NOTHROW(
wholememory_convert_tensor_desc_to_matrix(&output_matrix_desc, output_desc));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_ops::scatter_func(dev_embedding_recv_buffer_ptr,
local_recv_buffer_desc,
dev_raw_indice_ptr,
raw_indice_desc,
output_gref,
output_matrix_desc,
stream));
WM_CUDA_DEBUG_SYNC_STREAM(stream);
} else {
wholememory_gref_t global_raw_gref, global_cached_gref, global_cached_line_tag_gref;
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_tensor_get_global_reference(allocated_embedding, &global_raw_gref));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_tensor_get_global_reference(
cache_ptr_->cache_line_data_wm_tensor_, &global_cached_gref));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_tensor_get_global_reference(
cache_ptr_->cache_line_tag_wm_tensor_, &global_cached_line_tag_gref));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_ops::gather_cached_func(
global_raw_gref,
wholememory_tensor_get_tensor_description(allocated_embedding),
global_cached_gref,
wholememory_tensor_get_tensor_description(cache_ptr_->cache_line_data_wm_tensor_),
global_cached_line_tag_gref,
wholememory_tensor_get_data_pointer(indices),
indice_desc,
wholememory_tensor_get_data_pointer(output),
output_desc,
cache_ptr_->get_cache_set_coverage(),
0,
0,
stream));
}
return WHOLEMEMORY_SUCCESS;
}
class local_cached_global_readonly_embedding : public embedding_base {
public:
local_cached_global_readonly_embedding() = default;
virtual ~local_cached_global_readonly_embedding() = default;
wholememory_error_code_t gather(wholememory_tensor_t indices,
wholememory_tensor_t output,
bool adjust_cache,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream) noexcept override;
};
wholememory_error_code_t local_cached_global_readonly_embedding::gather(
wholememory_tensor_t indices,
wholememory_tensor_t output,
bool adjust_cache,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream) noexcept
{
WHOLEMEMORY_CHECK_NOTHROW(cache_policy->cache_memory_type != WHOLEMEMORY_MT_DISTRIBUTED);
auto* indice_desc = wholememory_tensor_get_tensor_description(indices);
auto* output_desc = wholememory_tensor_get_tensor_description(output);
WHOLEMEMORY_CHECK_NOTHROW(indice_desc->dim == 1);
wholememory_ops::temp_memory_handle host_recv_rank_id_count_handle(p_env_fns),
host_rank_id_count_handle(p_env_fns);
wholememory_ops::temp_memory_handle dev_recv_indices_buffer_handle(p_env_fns);
wholememory_ops::temp_memory_handle dev_raw_indice_handle(p_env_fns);
size_t const embedding_entry_count_per_rank =
wholememory_tensor_get_entry_per_partition(cache_ptr_->access_count_wm_tensor_);
wholememory_ops::wm_thrust_allocator thrust_allocator(p_env_fns);
int cache_world_size = -1, cache_world_rank = -1;
int64_t* host_recv_rank_id_count_ptr = nullptr;
int64_t* host_rank_id_count_ptr = nullptr;
int64_t* dev_raw_indice_ptr = nullptr;
int64_t total_recv_count = 0;
// Actually, WHOLEMEMORY_MT_DISTRIBUTED is actully not supported now
if (adjust_cache) {
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_communicator_get_size(&cache_world_size, cache_policy->cache_comm));
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_communicator_get_rank(&cache_world_rank, cache_policy->cache_comm));
host_recv_rank_id_count_ptr = static_cast<int64_t*>(
host_recv_rank_id_count_handle.pinned_malloc(cache_world_size, WHOLEMEMORY_DT_INT64));
host_rank_id_count_ptr = static_cast<int64_t*>(
host_rank_id_count_handle.pinned_malloc(cache_world_size, WHOLEMEMORY_DT_INT64));
dev_raw_indice_ptr = static_cast<int64_t*>(
dev_raw_indice_handle.device_malloc(indice_desc->sizes[0], WHOLEMEMORY_DT_INT64));
wholememory_array_description_t indice_array_desc;
WHOLEMEMORY_CHECK_NOTHROW(
wholememory_convert_tensor_desc_to_array(&indice_array_desc, indice_desc));
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_ops::bucket_and_exchange_ids_func(wholememory_tensor_get_data_pointer(indices),
indice_array_desc,
host_recv_rank_id_count_ptr,
host_rank_id_count_ptr,
&dev_recv_indices_buffer_handle,
dev_raw_indice_ptr,
embedding_entry_count_per_rank,
cache_policy->cache_comm,
&thrust_allocator,
p_env_fns,
stream));
// adjust cache
{
total_recv_count = 0;
for (int i = 0; i < cache_world_size; i++) {
total_recv_count += host_recv_rank_id_count_ptr[i];
}
auto update_indice_desc =
wholememory_create_array_desc(total_recv_count, 0, indice_desc->dtype);
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_ops::update_cache_different_comm(dev_recv_indices_buffer_handle.pointer(),
update_indice_desc,
allocated_embedding,
cache_policy->cache_comm,
embedding_entry_count_per_rank,
cache_ptr_->get_cache_local_data(),
cache_ptr_->get_cache_set_coverage(),
p_env_fns,
stream));
WM_CUDA_CHECK_NO_THROW(cudaStreamSynchronize(stream));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_barrier(cache_policy->cache_comm));
}
}
wholememory_gref_t cached_gref, cached_line_tag_gref;
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_tensor_get_global_reference(cache_ptr_->cache_line_data_wm_tensor_, &cached_gref));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_tensor_get_global_reference(
cache_ptr_->cache_line_tag_wm_tensor_, &cached_line_tag_gref));
wholememory_ops::temp_memory_handle dev_miss_ids_handle(p_env_fns);
void* dev_miss_ids_ptr =
dev_miss_ids_handle.device_malloc(indice_desc->sizes[0], indice_desc->dtype);
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_ops::try_gather_cached_func(
cached_gref,
wholememory_tensor_get_tensor_description(cache_ptr_->cache_line_data_wm_tensor_),
cached_line_tag_gref,
wholememory_tensor_get_data_pointer(indices),
indice_desc,
nullptr,
dev_miss_ids_ptr,
wholememory_tensor_get_data_pointer(output),
output_desc,
cache_ptr_->get_cache_set_coverage(),
0,
stream));
wholememory_tensor_t missed_indices_tensor;
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_make_tensor_from_pointer(&missed_indices_tensor, dev_miss_ids_ptr, indice_desc));
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_gather(allocated_embedding, missed_indices_tensor, output, p_env_fns, stream));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_destroy_tensor(missed_indices_tensor));
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory
#ifdef __cplusplus
extern "C" {
#endif
wholememory_error_code_t wholememory_create_embedding_optimizer(
wholememory_embedding_optimizer_t* optimizer, wholememory_optimizer_type_t optimizer_type)
{
return wholememory::create_embedding_optimizer(optimizer, optimizer_type);
}
wholememory_error_code_t wholememory_optimizer_set_parameter(
wholememory_embedding_optimizer_t optimizer, const char* parameter_name, void* value)
{
return wholememory::optimizer_set_parameter(optimizer, parameter_name, value);
}
void wholememory_destroy_embedding_optimizer(wholememory_embedding_optimizer_t optimizer)
{
wholememory::destroy_embedding_optimizer(optimizer);
}
wholememory_error_code_t wholememory_create_embedding_cache_policy(
wholememory_embedding_cache_policy_t* cache_policy,
wholememory_comm_t cache_level_comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
wholememory_access_type_t access_type,
float cache_ratio)
{
if (cache_ratio > 1.0F || cache_ratio < 1.0F / 512) {
WHOLEMEMORY_ERROR("cache_ratio should in range [1/512, 1.0]");
return WHOLEMEMORY_INVALID_VALUE;
}
auto* embedding_cache_policy = new wholememory_embedding_cache_policy_;
embedding_cache_policy->cache_comm = cache_level_comm;
embedding_cache_policy->cache_memory_type = memory_type;
embedding_cache_policy->cache_memory_location = memory_location;
embedding_cache_policy->access_type = access_type;
embedding_cache_policy->cache_ratio = cache_ratio;
*cache_policy = embedding_cache_policy;
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t wholememory_destroy_embedding_cache_policy(
wholememory_embedding_cache_policy_t cache_policy)
{
delete cache_policy;
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t wholememory_create_embedding(
wholememory_embedding_t* wholememory_embedding,
wholememory_tensor_description_t* embedding_description,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
wholememory_embedding_optimizer_t optimizer,
wholememory_embedding_cache_policy_t cache_policy)
{
wholememory_matrix_description_t embedding_matrix_description;
if (!wholememory_convert_tensor_desc_to_matrix(&embedding_matrix_description,
embedding_description)) {
WHOLEMEMORY_ERROR("wholememory_create_embedding input description must be 2D matrix");
return WHOLEMEMORY_INVALID_INPUT;
}
wholememory::embedding_base* embedding_impl_ptr = nullptr;
int embedding_world_size = 1;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_size(&embedding_world_size, comm));
if (cache_policy != nullptr) {
if (cache_policy->cache_comm == comm) {
if (cache_policy->cache_memory_location != WHOLEMEMORY_ML_DEVICE) {
WHOLEMEMORY_ERROR(
"Cache has same communicator with raw embedding, should be device cached host embedding,"
" but cache memory location is not WHOLEMEMORY_ML_DEVICE.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (cache_policy->cache_memory_type < memory_type) {
WHOLEMEMORY_ERROR(
"For device cached host memory, raw embedding should cover cache's address modes.");
return WHOLEMEMORY_INVALID_INPUT;
}
embedding_impl_ptr = new wholememory::device_cached_host_embedding();
} else {
int const cache_world_size = 1;
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_communicator_get_size(&embedding_world_size, cache_policy->cache_comm));
WHOLEMEMORY_CHECK_NOTHROW(cache_world_size <= embedding_world_size);
if (cache_policy->cache_memory_type == WHOLEMEMORY_MT_DISTRIBUTED) {
WHOLEMEMORY_ERROR(
"For local cached global readonly embedding, cache_memory_type should be chunked or "
"continuous.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (cache_policy->access_type != WHOLEMEMORY_AT_READONLY) {
WHOLEMEMORY_ERROR(
"Only ReadOnly access type supported for local cached global readonly embedding.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (optimizer != nullptr) {
WHOLEMEMORY_ERROR("optimizer not supported for local cached global readonly embedding.");
return WHOLEMEMORY_INVALID_INPUT;
}
embedding_impl_ptr = new wholememory::local_cached_global_readonly_embedding();
}
} else {
embedding_impl_ptr = new wholememory::noncached_embedding();
}
WHOLEMEMORY_RETURN_ON_FAIL(embedding_impl_ptr->allocate(
&embedding_matrix_description, comm, memory_type, memory_location, cache_policy, optimizer));
*wholememory_embedding = static_cast<wholememory_embedding_t>(embedding_impl_ptr);
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t wholememory_destroy_embedding(
wholememory_embedding_t wholememory_embedding)
{
if (wholememory_embedding == nullptr) { return WHOLEMEMORY_INVALID_INPUT; }
auto* embedding_impl_ptr = static_cast<wholememory::embedding_base*>(wholememory_embedding);
delete embedding_impl_ptr;
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t wholememory_embedding_gather(wholememory_embedding_t wholememory_embedding,
wholememory_tensor_t indices,
wholememory_tensor_t output,
bool adjust_cache,
wholememory_env_func_t* p_env_fns,
int64_t stream_int)
{
auto* embedding_impl_ptr = static_cast<wholememory::embedding_base*>(wholememory_embedding);
return embedding_impl_ptr->gather(
indices, output, adjust_cache, p_env_fns, (cudaStream_t)stream_int);
}
wholememory_error_code_t wholememory_embedding_gather_gradient_apply(
wholememory_embedding_t wholememory_embedding,
wholememory_tensor_t indices,
wholememory_tensor_t grads,
bool adjust_cache,
float lr,
wholememory_env_func_t* p_env_fns,
int64_t stream_int)
{
auto* embedding_impl_ptr = static_cast<wholememory::embedding_base*>(wholememory_embedding);
return embedding_impl_ptr->gather_gradient_apply(
indices, grads, adjust_cache, lr, p_env_fns, (cudaStream_t)stream_int);
}
wholememory_tensor_t wholememory_embedding_get_embedding_tensor(
wholememory_embedding_t wholememory_embedding)
{
wholememory::embedding_base* embedding_impl_ptr =
static_cast<wholememory::embedding_base*>(wholememory_embedding);
return embedding_impl_ptr->user_embedding;
}
const char* const* wholememory_embedding_get_optimizer_state_names(
wholememory_embedding_t wholememory_embedding)
{
wholememory::embedding_base* embedding_impl_ptr =
static_cast<wholememory::embedding_base*>(wholememory_embedding);
return embedding_impl_ptr->get_optimizer_state_names();
}
wholememory_tensor_t wholememory_embedding_get_optimizer_state(
wholememory_embedding_t wholememory_embedding, const char* name)
{
wholememory::embedding_base* embedding_impl_ptr =
static_cast<wholememory::embedding_base*>(wholememory_embedding);
return embedding_impl_ptr->get_optimizer_state(name);
}
wholememory_error_code_t wholememory_embedding_writeback_cache(
wholememory_embedding_t wholememory_embedding, int64_t stream_int)
{
cudaStream_t stream = reinterpret_cast<cudaStream_t>(stream_int);
return static_cast<wholememory::embedding_base*>(wholememory_embedding)
->writeback_all_caches(stream);
}
wholememory_error_code_t wholememory_embedding_drop_all_cache(
wholememory_embedding_t wholememory_embedding, int64_t stream_int)
{
cudaStream_t stream = reinterpret_cast<cudaStream_t>(stream_int);
return static_cast<wholememory::embedding_base*>(wholememory_embedding)->drop_all_caches(stream);
}
#ifdef __cplusplus
}
#endif
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/wholememory_tensor.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wholememory/wholememory_tensor.h"
#include <atomic>
#include <cstdlib>
#include "logger.hpp"
#ifdef WM_TENSOR_COUNT_DEBUG
static std::atomic<int64_t> wm_tensor_count;
static void inc_tensor_count() { wm_tensor_count.fetch_add(1); }
static void dec_tensor_count() { wm_tensor_count.fetch_add(-1); }
static int64_t get_tensor_count() { return wm_tensor_count.load(); }
#else
static void inc_tensor_count() {}
static void dec_tensor_count() {}
static int64_t get_tensor_count() { return 0; }
#endif
#ifdef __cplusplus
extern "C" {
#endif
struct wholememory_tensor_ {
union {
wholememory_handle_t wholememory_handle;
void* storage_ptr;
};
wholememory_tensor_description_t tensor_description;
wholememory_tensor_t root_tensor;
bool is_wholememory;
bool own_handle;
};
int64_t get_wholememory_tensor_count() { return get_tensor_count(); }
wholememory_error_code_t wholememory_create_tensor(
wholememory_tensor_t* p_wholememory_tensor,
wholememory_tensor_description_t* tensor_description,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location)
{
if (p_wholememory_tensor == nullptr) {
WHOLEMEMORY_ERROR("p_wholememory_tensor is nullptr");
return WHOLEMEMORY_INVALID_INPUT;
}
if (tensor_description == nullptr) {
WHOLEMEMORY_ERROR("tensor_description is nullptr");
return WHOLEMEMORY_INVALID_INPUT;
}
if (tensor_description->dim <= 0 || tensor_description->dim > 2) {
WHOLEMEMORY_ERROR("tensor_description->dim=%d", tensor_description->dim);
return WHOLEMEMORY_INVALID_INPUT;
}
if (tensor_description->storage_offset != 0) {
WHOLEMEMORY_ERROR("tensor_description->storage_offset=%ld", tensor_description->storage_offset);
return WHOLEMEMORY_INVALID_INPUT;
}
int const dim = tensor_description->dim;
if (tensor_description->strides[dim - 1] != 1) {
WHOLEMEMORY_ERROR("tensor_description->strides[dim - 1]", tensor_description->strides[dim - 1]);
return WHOLEMEMORY_INVALID_INPUT;
}
if (tensor_description->dtype <= WHOLEMEMORY_DT_UNKNOWN ||
tensor_description->dtype >= WHOLEMEMORY_DT_COUNT) {
WHOLEMEMORY_ERROR("tensor_description is unknown");
return WHOLEMEMORY_INVALID_INPUT;
}
size_t elt_count = wholememory_get_memory_element_count_from_tensor(tensor_description);
size_t elt_size = wholememory_dtype_get_element_size(tensor_description->dtype);
size_t malloc_size = elt_count * elt_size;
size_t granularity = elt_size * tensor_description->strides[0];
auto* wholememory_tensor = static_cast<wholememory_tensor_*>(malloc(sizeof(wholememory_tensor_)));
wholememory_tensor->tensor_description = *tensor_description;
wholememory_tensor->own_handle = true;
wholememory_tensor->is_wholememory = true;
wholememory_tensor->root_tensor = wholememory_tensor;
*p_wholememory_tensor = wholememory_tensor;
auto ret_code = wholememory_malloc(&wholememory_tensor->wholememory_handle,
malloc_size,
comm,
memory_type,
memory_location,
granularity);
inc_tensor_count();
if (ret_code != WHOLEMEMORY_SUCCESS) { free(wholememory_tensor); }
return ret_code;
}
wholememory_error_code_t wholememory_destroy_tensor(wholememory_tensor_t wholememory_tensor)
{
if (wholememory_tensor->own_handle) {
if (wholememory_tensor->is_wholememory) {
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_free(wholememory_tensor->wholememory_handle));
} else {
free(wholememory_tensor->storage_ptr);
}
}
dec_tensor_count();
free(wholememory_tensor);
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t wholememory_make_tensor_from_pointer(
wholememory_tensor_t* p_wholememory_tensor,
void* storage_ptr,
wholememory_tensor_description_t* tensor_description)
{
if (storage_ptr == nullptr || tensor_description->dim == 0) {
auto* wholememory_tensor =
static_cast<wholememory_tensor_*>(malloc(sizeof(wholememory_tensor_)));
wholememory_tensor->storage_ptr = storage_ptr;
wholememory_tensor->tensor_description = *tensor_description;
wholememory_tensor->own_handle = false;
wholememory_tensor->is_wholememory = false;
wholememory_tensor->root_tensor = wholememory_tensor;
*p_wholememory_tensor = wholememory_tensor;
inc_tensor_count();
return WHOLEMEMORY_SUCCESS;
}
if (p_wholememory_tensor == nullptr || tensor_description == nullptr) {
return WHOLEMEMORY_INVALID_INPUT;
}
if (tensor_description->dim < 0) {
WHOLEMEMORY_ERROR("tensor_description->dim=%d", tensor_description->dim);
return WHOLEMEMORY_INVALID_INPUT;
}
int const dim = tensor_description->dim;
if (tensor_description->strides[dim - 1] != 1) {
WHOLEMEMORY_ERROR("tensor_description->strides[dim - 1]", tensor_description->strides[dim - 1]);
return WHOLEMEMORY_INVALID_INPUT;
}
if (tensor_description->dtype <= WHOLEMEMORY_DT_UNKNOWN ||
tensor_description->dtype >= WHOLEMEMORY_DT_COUNT) {
WHOLEMEMORY_ERROR("tensor_description is unknown");
return WHOLEMEMORY_INVALID_INPUT;
}
auto* wholememory_tensor = static_cast<wholememory_tensor_*>(malloc(sizeof(wholememory_tensor_)));
wholememory_tensor->storage_ptr = storage_ptr;
wholememory_tensor->tensor_description = *tensor_description;
wholememory_tensor->own_handle = false;
wholememory_tensor->is_wholememory = false;
wholememory_tensor->root_tensor = wholememory_tensor;
*p_wholememory_tensor = wholememory_tensor;
inc_tensor_count();
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t wholememory_make_tensor_from_handle(
wholememory_tensor_t* p_wholememory_tensor,
wholememory_handle_t wholememory_handle,
wholememory_tensor_description_t* tensor_description)
{
if (wholememory_handle == nullptr || p_wholememory_tensor == nullptr ||
tensor_description == nullptr) {
return WHOLEMEMORY_INVALID_INPUT;
}
if (tensor_description->dim <= 0 || tensor_description->dim > 2) {
WHOLEMEMORY_ERROR("tensor_description->dim=%d", tensor_description->dim);
return WHOLEMEMORY_INVALID_INPUT;
}
int const dim = tensor_description->dim;
if (tensor_description->strides[dim - 1] != 1) {
WHOLEMEMORY_ERROR("tensor_description->strides[dim - 1]", tensor_description->strides[dim - 1]);
return WHOLEMEMORY_INVALID_INPUT;
}
if (tensor_description->dtype <= WHOLEMEMORY_DT_UNKNOWN ||
tensor_description->dtype >= WHOLEMEMORY_DT_COUNT) {
WHOLEMEMORY_ERROR("tensor_description is unknown");
return WHOLEMEMORY_INVALID_INPUT;
}
auto* wholememory_tensor = static_cast<wholememory_tensor_*>(malloc(sizeof(wholememory_tensor_)));
wholememory_tensor->wholememory_handle = wholememory_handle;
wholememory_tensor->tensor_description = *tensor_description;
wholememory_tensor->own_handle = false;
wholememory_tensor->is_wholememory = true;
wholememory_tensor->root_tensor = wholememory_tensor;
*p_wholememory_tensor = wholememory_tensor;
inc_tensor_count();
return WHOLEMEMORY_SUCCESS;
}
bool wholememory_tensor_has_handle(wholememory_tensor_t wholememory_tensor)
{
return wholememory_tensor->is_wholememory;
}
wholememory_handle_t wholememory_tensor_get_memory_handle(wholememory_tensor_t wholememory_tensor)
{
if (wholememory_tensor->is_wholememory) { return wholememory_tensor->wholememory_handle; }
return nullptr;
}
wholememory_tensor_description_t* wholememory_tensor_get_tensor_description(
wholememory_tensor_t wholememory_tensor)
{
return &wholememory_tensor->tensor_description;
}
wholememory_error_code_t wholememory_tensor_get_global_reference(
wholememory_tensor_t wholememory_tensor, wholememory_gref_t* wholememory_gref)
{
if (wholememory_gref == nullptr || wholememory_tensor == nullptr) {
return WHOLEMEMORY_INVALID_INPUT;
}
if (wholememory_tensor->is_wholememory) {
return wholememory_get_global_reference(wholememory_gref,
wholememory_tensor->wholememory_handle);
}
*wholememory_gref =
wholememory_create_continuous_global_reference(wholememory_tensor->storage_ptr);
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t wholememory_tensor_map_local_tensor(
wholememory_tensor_t wholememory_tensor, wholememory_tensor_t* local_tensor)
{
// NOTE: wholememory_tensor should NOT skip entry from front, but can skip from tail.
if (local_tensor == nullptr || wholememory_tensor == nullptr) {
return WHOLEMEMORY_INVALID_INPUT;
}
if (!wholememory_tensor->is_wholememory) { return WHOLEMEMORY_INVALID_VALUE; }
auto* wm_desc = wholememory_tensor_get_tensor_description(wholememory_tensor);
if (wm_desc->dim != 1 && wm_desc->dim != 2) { return WHOLEMEMORY_INVALID_VALUE; }
if (wm_desc->dim == 1 && wm_desc->storage_offset != 0) { return WHOLEMEMORY_INVALID_VALUE; }
if (wm_desc->dim == 2 && wm_desc->storage_offset + wm_desc->sizes[1] > wm_desc->strides[0]) {
return WHOLEMEMORY_INVALID_VALUE;
}
wholememory_comm_t wm_comm;
int world_rank;
void* local_ptr;
size_t local_size, local_offset;
auto* handle = wholememory_tensor_get_memory_handle(wholememory_tensor);
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_get_communicator(&wm_comm, handle));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_rank(&world_rank, wm_comm));
size_t total_handle_memory_size = wholememory_get_total_size(handle);
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_get_local_memory(&local_ptr, &local_size, &local_offset, handle));
size_t const element_size = wholememory_dtype_get_element_size(wm_desc->dtype);
size_t const gran_size = wm_desc->dim == 1 ? element_size : element_size * wm_desc->strides[0];
size_t size_per_rank;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_get_partition_plan(&size_per_rank, handle));
WHOLEMEMORY_CHECK_NOTHROW(size_per_rank % gran_size == 0);
size_t entry_per_rank = size_per_rank / gran_size;
int64_t local_start = std::min<int64_t>(entry_per_rank * world_rank, wm_desc->sizes[0]);
int64_t local_end = std::min<int64_t>(entry_per_rank * (world_rank + 1), wm_desc->sizes[0]);
if (local_size % gran_size != 0) return WHOLEMEMORY_LOGIC_ERROR;
wholememory_tensor_description_t local_desc = *wm_desc;
// local_desc.sizes[0] = local_size / gran_size;
local_desc.sizes[0] = (local_end - local_start);
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_make_tensor_from_pointer(local_tensor, local_ptr, &local_desc));
return WHOLEMEMORY_SUCCESS;
}
void* wholememory_tensor_get_data_pointer(wholememory_tensor_t wholememory_tensor)
{
char* data_ptr = nullptr;
if (wholememory_tensor->is_wholememory &&
wholememory_get_memory_type(wholememory_tensor->wholememory_handle) !=
WHOLEMEMORY_MT_CONTINUOUS) {
return nullptr;
}
if (!wholememory_tensor->is_wholememory) {
data_ptr = static_cast<char*>(wholememory_tensor->storage_ptr);
} else {
if (wholememory_get_global_pointer(reinterpret_cast<void**>(&data_ptr),
wholememory_tensor->wholememory_handle) !=
WHOLEMEMORY_SUCCESS) {
return nullptr;
}
}
return data_ptr +
wholememory_dtype_get_element_size(wholememory_tensor->tensor_description.dtype) *
wholememory_tensor->tensor_description.storage_offset;
}
size_t wholememory_tensor_get_entry_per_partition(wholememory_tensor_t wholememory_tensor)
{
wholememory_tensor_t root_tensor = wholememory_tensor_get_root(wholememory_tensor);
WHOLEMEMORY_CHECK_NOTHROW(
(root_tensor->tensor_description.dim == 1 || root_tensor->tensor_description.dim == 2));
if (wholememory_tensor->is_wholememory) {
size_t size_per_rank;
wholememory_get_partition_plan(&size_per_rank,
wholememory_tensor_get_memory_handle(root_tensor));
size_t embedding_stride = 1;
size_t const element_size =
wholememory_dtype_get_element_size(wholememory_tensor->tensor_description.dtype);
if (root_tensor->tensor_description.dim == 2) {
embedding_stride = root_tensor->tensor_description.strides[0];
}
WHOLEMEMORY_CHECK_NOTHROW(size_per_rank % (embedding_stride * element_size) == 0);
size_t det_entry_per_rank;
int world_size;
wholememory_comm_t comm;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_get_communicator(
&comm, wholememory_tensor_get_memory_handle(wholememory_tensor)));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_size(&world_size, comm));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_determine_entry_partition_plan(
&det_entry_per_rank, root_tensor->tensor_description.sizes[0], world_size));
WHOLEMEMORY_CHECK_NOTHROW(det_entry_per_rank ==
size_per_rank / (embedding_stride * element_size));
return det_entry_per_rank;
}
return root_tensor->tensor_description.sizes[0];
}
wholememory_error_code_t wholememory_tensor_get_subtensor(
wholememory_tensor_t wholememory_tensor,
int64_t* starts,
int64_t* ends,
wholememory_tensor_t* p_sub_wholememory_tensor)
{
if (p_sub_wholememory_tensor == nullptr || wholememory_tensor == nullptr || starts == nullptr ||
ends == nullptr) {
return WHOLEMEMORY_INVALID_INPUT;
}
if (wholememory_tensor->tensor_description.dim > 2) { return WHOLEMEMORY_NOT_IMPLEMENTED; }
int const dim = wholememory_tensor->tensor_description.dim;
int64_t offsets[2] = {0, 0};
if (dim == 1) {
offsets[0] = wholememory_tensor->tensor_description.storage_offset;
} else {
offsets[0] = wholememory_tensor->tensor_description.storage_offset /
wholememory_tensor->tensor_description.strides[0];
offsets[1] = wholememory_tensor->tensor_description.storage_offset %
wholememory_tensor->tensor_description.strides[0];
}
int64_t new_size[2] = {0, 0};
int64_t new_offset = wholememory_tensor->tensor_description.storage_offset;
for (int i = 0; i < dim; i++) {
int64_t starts_i = starts[i];
int64_t ends_i = ends[i];
if (starts[i] == -1) starts_i = 0;
if (ends[i] == -1) ends_i = wholememory_tensor->tensor_description.sizes[i];
if (ends_i <= starts_i) return WHOLEMEMORY_INVALID_INPUT;
if (starts_i >= wholememory_tensor->tensor_description.sizes[i])
return WHOLEMEMORY_INVALID_INPUT;
if (ends_i <= 0) return WHOLEMEMORY_INVALID_INPUT;
new_offset += wholememory_tensor->tensor_description.strides[i] * starts_i;
new_size[i] = ends_i - starts_i;
}
auto* sub_wholememory_tensor =
static_cast<wholememory_tensor_*>(malloc(sizeof(wholememory_tensor_)));
*sub_wholememory_tensor = *wholememory_tensor;
sub_wholememory_tensor->own_handle = false;
sub_wholememory_tensor->tensor_description.storage_offset = new_offset;
sub_wholememory_tensor->tensor_description.dim = dim;
sub_wholememory_tensor->tensor_description.dtype =
sub_wholememory_tensor->tensor_description.dtype;
for (int i = 0; i < dim; i++) {
sub_wholememory_tensor->tensor_description.sizes[i] = new_size[i];
sub_wholememory_tensor->tensor_description.strides[i] =
wholememory_tensor->tensor_description.strides[i];
}
*p_sub_wholememory_tensor = sub_wholememory_tensor;
inc_tensor_count();
return WHOLEMEMORY_SUCCESS;
}
wholememory_tensor_t wholememory_tensor_get_root(wholememory_tensor_t wholememory_tensor)
{
return wholememory_tensor->root_tensor;
}
#ifdef __cplusplus
}
#endif
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/embedding_cache.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime_api.h>
#include <wholememory/embedding.h>
#include <wholememory/wholememory_tensor.h>
#ifdef __cplusplus
extern "C" {
#endif
struct wholememory_embedding_cache_policy_ {
wholememory_comm_t cache_comm = nullptr; // nullptr means only use local GPU
wholememory_memory_type_t cache_memory_type;
wholememory_memory_location_t cache_memory_location;
wholememory_access_type_t access_type;
float cache_ratio = 0.2F;
};
#ifdef __cplusplus
}
#endif
namespace wholememory {
class embedding_cache_local_data {
public:
embedding_cache_local_data() = default;
~embedding_cache_local_data();
wholememory_tensor_t cache_line_tag_ = nullptr;
wholememory_tensor_t cache_line_lfu_count_ = nullptr;
wholememory_tensor_t cache_line_data_ = nullptr;
wholememory_tensor_t access_count_ = nullptr;
};
class embedding_cache_base {
public:
explicit embedding_cache_base(wholememory_embedding_cache_policy_t cache_policy);
embedding_cache_base() = delete;
embedding_cache_base(const embedding_cache_base&) = delete;
virtual ~embedding_cache_base();
embedding_cache_local_data* get_cache_local_data() { return &local_cache_; }
[[nodiscard]] int get_cache_set_coverage() const { return cache_set_coverage_; }
virtual wholememory_error_code_t get_embedding_requirement(
wholememory_tensor_description_t* padded_desc,
wholememory_matrix_description_t data_desc,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location) noexcept = 0;
wholememory_error_code_t allocate(wholememory_tensor_t raw_data_tensor) noexcept;
virtual wholememory_error_code_t writeback_all_cache(cudaStream_t stream) noexcept;
virtual wholememory_error_code_t drop_all_cache(cudaStream_t stream) noexcept;
// wholememory_error_code_t refill_all_cache(cudaStream_t stream) noexcept;
static constexpr int64_t kEmbeddingAlignmentInBytes = 16;
static constexpr int kCacheSetSize = 32;
// Tag format:
// 1 bit Valid, 1 bit Modified, 14 bit indice.
static constexpr int kCacheSetCoverageBits = 14; // 2 bits left for modified and valid state
static constexpr uint16_t kvalidCacheTagValue = 1U << (kCacheSetCoverageBits + 1);
static constexpr int kMaxCacheSetCoverage = 1 << kCacheSetCoverageBits;
// Counter format:
// 14 bit scaled counter, 2 bit per thread (64 bit per set) set scaling info.
static constexpr int kScaledCounterBits =
14; // 2 bits (64 bits in set) left for scale and reserved
// cache related tensor
wholememory_tensor_t cache_line_tag_wm_tensor_ = nullptr;
wholememory_tensor_t cache_line_lfu_count_wm_tensor_ = nullptr;
wholememory_tensor_t cache_line_data_wm_tensor_ = nullptr;
wholememory_tensor_t access_count_wm_tensor_ = nullptr;
protected:
void pad_last_dim(wholememory_matrix_description_t data_desc) noexcept;
wholememory_error_code_t compute_cache_set_coverage() noexcept;
wholememory_error_code_t check_raw_tensor(wholememory_tensor_t raw_data_tensor) noexcept;
wholememory_matrix_description_t padded_matrix_description_;
wholememory_matrix_description_t matrix_description_;
wholememory_tensor_t padded_raw_tensor_ = nullptr; // just a reference, not owned
wholememory_comm_t raw_comm_ = nullptr;
wholememory_memory_type_t raw_memory_type_ = WHOLEMEMORY_MT_NONE;
wholememory_memory_location_t raw_memory_location_ = WHOLEMEMORY_ML_NONE;
wholememory_embedding_cache_policy_t cache_policy_ = nullptr;
int cache_set_coverage_ = kCacheSetSize;
int64_t padded_embedding_count_for_cache_ = 0;
embedding_cache_local_data local_cache_;
};
class device_cache_for_host : public embedding_cache_base {
public:
device_cache_for_host(wholememory_embedding_cache_policy_t cache_policy);
device_cache_for_host() = delete;
device_cache_for_host(const device_cache_for_host&) = delete;
~device_cache_for_host();
wholememory_error_code_t get_embedding_requirement(
wholememory_tensor_description_t* padded_desc,
wholememory_matrix_description_t data_desc,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location) noexcept override;
wholememory_error_code_t writeback_all_cache(cudaStream_t stream) noexcept override;
wholememory_error_code_t drop_all_cache(cudaStream_t stream) noexcept override;
};
class local_cache_for_global : public embedding_cache_base {
public:
local_cache_for_global(wholememory_embedding_cache_policy_t cache_policy);
local_cache_for_global() = delete;
local_cache_for_global(const local_cache_for_global&) = delete;
~local_cache_for_global();
wholememory_error_code_t get_embedding_requirement(
wholememory_tensor_description_t* padded_desc,
wholememory_matrix_description_t data_desc,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location) noexcept override;
wholememory_error_code_t drop_all_cache(cudaStream_t stream) noexcept override;
};
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/system_info.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
bool DevAttrPagebleMemoryAccess();
bool DeviceCanAccessPeer(int peer_device);
bool DevicesCanAccessP2P(const int* dev_ids, int count);
int GetCudaCompCap();
const char* GetCPUArch();
bool SupportMNNVL();
bool SupportEGM();
bool SupportMNNVLForEGM();
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/nccl_comms.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
#include <cuda_runtime_api.h>
#include <nccl.h>
#include <vector>
#include <wholememory/wholememory.h>
namespace wholememory {
class nccl_comms {
public:
nccl_comms() = delete;
/**
* @brief Constructor for collective + point-to-point operation.
* @param nccl_comm initialized nccl comm
* @param num_ranks number of ranks in the cluster
* @param rank rank of the current worker
* @param stream cuda stream for synchronizing and ordering collective operations
*/
nccl_comms(ncclComm_t nccl_comm, int num_ranks, int rank, cudaStream_t stream);
void initialize();
~nccl_comms();
int get_size() const { return num_ranks_; }
int get_rank() const { return rank_; }
void barrier() const;
void allreduce(const void* sendbuff,
void* recvbuff,
size_t count,
ncclDataType_t datatype,
ncclRedOp_t op,
cudaStream_t stream) const;
void host_allreduce(const void* sendbuff,
void* recvbuff,
size_t count,
ncclDataType_t datatype,
ncclRedOp_t op) const;
void bcast(
void* buff, size_t count, ncclDataType_t datatype, int root, cudaStream_t stream) const;
void bcast(const void* sendbuff,
void* recvbuff,
size_t count,
ncclDataType_t datatype,
int root,
cudaStream_t stream) const;
void host_bcast(
const void* sendbuff, void* recvbuff, size_t count, ncclDataType_t datatype, int root) const;
void host_bcast(void* buff, size_t count, ncclDataType_t datatype, int root) const;
void reduce(const void* sendbuff,
void* recvbuff,
size_t count,
ncclDataType_t datatype,
ncclRedOp_t op,
int root,
cudaStream_t stream) const;
void host_reduce(const void* sendbuff,
void* recvbuff,
size_t count,
ncclDataType_t datatype,
ncclRedOp_t op,
int root) const;
void allgather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
ncclDataType_t datatype,
cudaStream_t stream) const;
void host_allgather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
ncclDataType_t datatype) const;
void allgatherv(const void* sendbuf,
void* recvbuf,
const size_t* recvcounts,
const size_t* displs,
ncclDataType_t datatype,
cudaStream_t stream) const;
void host_allgatherv(const void* sendbuf,
void* recvbuf,
const size_t* recvcounts,
const size_t* displs,
ncclDataType_t datatype) const;
void gather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
ncclDataType_t datatype,
int root,
cudaStream_t stream) const;
void host_gather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
ncclDataType_t datatype,
int root) const;
void gatherv(const void* sendbuff,
void* recvbuff,
size_t sendcount,
const size_t* recvcounts,
const size_t* displs,
ncclDataType_t datatype,
int root,
cudaStream_t stream) const;
void reducescatter(const void* sendbuff,
void* recvbuff,
size_t recvcount,
ncclDataType_t datatype,
ncclRedOp_t op,
cudaStream_t stream) const;
void alltoall(const void* sendbuff,
void* recvbuff,
size_t sendcount,
ncclDataType_t datatype,
cudaStream_t stream) const;
void host_alltoall(const void* sendbuff,
void* recvbuff,
size_t sendcount,
ncclDataType_t datatype) const;
void alltoallv(const void* sendbuff,
void* recvbuff,
const size_t* sendcounts,
const size_t* senddispls,
const size_t* recvcounts,
const size_t* recvdispls,
ncclDataType_t datatype,
cudaStream_t stream) const;
wholememory_error_code_t sync_stream(cudaStream_t stream) const;
wholememory_error_code_t sync_stream() const;
// if a thread is sending & receiving at the same time, use device_sendrecv to avoid deadlock
void device_send(const void* send_buf, size_t send_size, int dest, cudaStream_t stream) const;
// if a thread is sending & receiving at the same time, use device_sendrecv to avoid deadlock
void device_recv(void* recv_buf, size_t recv_size, int source, cudaStream_t stream) const;
void device_sendrecv(const void* sendbuf,
size_t sendsize,
int dest,
void* recvbuf,
size_t recvsize,
int source,
cudaStream_t stream) const;
void device_multicast_sendrecv(const void* sendbuf,
std::vector<size_t> const& sendsizes,
std::vector<size_t> const& sendoffsets,
std::vector<int> const& dests,
void* recvbuf,
std::vector<size_t> const& recvsizes,
std::vector<size_t> const& recvoffsets,
std::vector<int> const& sources,
cudaStream_t stream) const;
void group_start() const;
void group_end() const;
private:
ncclComm_t nccl_comm_;
cudaStream_t rmm_stream_;
int num_ranks_;
int rank_;
char* host_send_buffer_;
char* host_recv_buffer_;
static constexpr size_t HOST_BUFFER_SIZE_PER_RANK = 1LL * 1024 * 1024;
int32_t* buf_;
};
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/env_func_ptrs.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/env_func_ptrs.h>
namespace wholememory {
struct default_memory_context_t {
wholememory_tensor_description_t desc;
wholememory_memory_allocation_type_t allocation_type;
void* ptr;
};
/**
* @brief : Default environment functions for memory allocation.
* Will use cudaMalloc/cudaFree, cudaMallocHost/cudaFreeHost, malloc/free.
* Useful for function tests, NOT designed for performance tests.
*
* @return : pointers to the functions of current CUDA device
*/
wholememory_env_func_t* get_default_env_func();
/**
* @brief : Environment functions for memory allocation with caches.
* Will cache allocated memory blocks, and reuse if possible.
* Minimal block size is 256 bytes, block with size < 1G bytes is aligned to power of 2,
* block with size >= 1G bytes is aligned to 1G bytes.
* Useful for performance tests. Need warm up to fill caches.
*
* @return : pointers to the functions of current CUDA device
*/
wholememory_env_func_t* get_cached_env_func();
/**
* @brief : drop all caches of inside cached allocator of current CUDA device
*/
void drop_cached_env_func_cache();
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/embedding_cache.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "embedding_cache.hpp"
#include <cmath>
#include "integer_utils.hpp"
#include "logger.hpp"
#include "memory_handle.hpp"
#include "wholememory_ops/functions/embedding_cache_func.h"
namespace wholememory {
embedding_cache_local_data::~embedding_cache_local_data()
{
if (cache_line_tag_ != nullptr) {
WHOLEMEMORY_CHECK_NOTHROW(wholememory_destroy_tensor(cache_line_tag_) == WHOLEMEMORY_SUCCESS);
cache_line_tag_ = nullptr;
}
if (cache_line_lfu_count_ != nullptr) {
WHOLEMEMORY_CHECK_NOTHROW(wholememory_destroy_tensor(cache_line_lfu_count_) ==
WHOLEMEMORY_SUCCESS);
cache_line_lfu_count_ = nullptr;
}
if (cache_line_data_ != nullptr) {
WHOLEMEMORY_CHECK_NOTHROW(wholememory_destroy_tensor(cache_line_data_) == WHOLEMEMORY_SUCCESS);
cache_line_data_ = nullptr;
}
if (access_count_ != nullptr) {
WHOLEMEMORY_CHECK_NOTHROW(wholememory_destroy_tensor(access_count_) == WHOLEMEMORY_SUCCESS);
access_count_ = nullptr;
}
}
embedding_cache_base::embedding_cache_base(wholememory_embedding_cache_policy_t cache_policy)
{
cache_policy_ = cache_policy;
}
embedding_cache_base::~embedding_cache_base()
{
if (cache_line_tag_wm_tensor_ != nullptr) {
WHOLEMEMORY_CHECK_NOTHROW(wholememory_destroy_tensor(cache_line_tag_wm_tensor_) ==
WHOLEMEMORY_SUCCESS);
cache_line_tag_wm_tensor_ = nullptr;
}
if (cache_line_lfu_count_wm_tensor_ != nullptr) {
WHOLEMEMORY_CHECK_NOTHROW(wholememory_destroy_tensor(cache_line_lfu_count_wm_tensor_) ==
WHOLEMEMORY_SUCCESS);
cache_line_lfu_count_wm_tensor_ = nullptr;
}
if (cache_line_data_wm_tensor_ != nullptr) {
WHOLEMEMORY_CHECK_NOTHROW(wholememory_destroy_tensor(cache_line_data_wm_tensor_) ==
WHOLEMEMORY_SUCCESS);
cache_line_data_wm_tensor_ = nullptr;
}
if (access_count_wm_tensor_ != nullptr) {
WHOLEMEMORY_CHECK_NOTHROW(wholememory_destroy_tensor(access_count_wm_tensor_) ==
WHOLEMEMORY_SUCCESS);
access_count_wm_tensor_ = nullptr;
}
if (cache_policy_ != nullptr) {
WHOLEMEMORY_CHECK_NOTHROW(wholememory_destroy_embedding_cache_policy(cache_policy_));
cache_policy_ = nullptr;
}
}
void embedding_cache_base::pad_last_dim(wholememory_matrix_description_t data_desc) noexcept
{
matrix_description_ = data_desc;
int64_t const embedding_count = matrix_description_.sizes[0];
int64_t const embedding_dim = matrix_description_.sizes[1];
size_t const element_size = wholememory_dtype_get_element_size(matrix_description_.dtype);
WHOLEMEMORY_CHECK_NOTHROW(element_size != -1);
int64_t const align_count = kEmbeddingAlignmentInBytes / element_size;
int64_t const embedding_stride = round_up_unsafe<int64_t>(embedding_dim, align_count);
matrix_description_.stride = embedding_stride;
padded_matrix_description_ = matrix_description_;
}
wholememory_error_code_t embedding_cache_base::check_raw_tensor(
wholememory_tensor_t raw_data_tensor) noexcept
{
// Check all are same as requested.
if (raw_data_tensor == nullptr) {
WHOLEMEMORY_ERROR("raw_data_tensor is null");
return WHOLEMEMORY_INVALID_INPUT;
}
if (!wholememory_tensor_has_handle(raw_data_tensor)) {
WHOLEMEMORY_ERROR("raw_data_tensor is not WholeMemory Tensor");
return WHOLEMEMORY_INVALID_INPUT;
}
auto* mem_handle = wholememory_tensor_get_memory_handle(raw_data_tensor);
if (mem_handle == nullptr) {
WHOLEMEMORY_ERROR("raw_data_tensor WholeMemory Handle is nullptr");
return WHOLEMEMORY_INVALID_INPUT;
}
if (wholememory_get_memory_type(mem_handle) != raw_memory_type_ ||
get_memory_location(mem_handle) != raw_memory_location_) {
WHOLEMEMORY_ERROR(
"raw_data_tensor WholeMemory type or location is not same as get_embedding_requirement");
return WHOLEMEMORY_INVALID_INPUT;
}
wholememory_comm_t comm = nullptr;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_get_communicator(&comm, mem_handle));
if (comm != raw_comm_) {
WHOLEMEMORY_ERROR(
"raw_data_tensor WholeMemory communicator is not same as get_embedding_requirement");
return WHOLEMEMORY_INVALID_INPUT;
}
auto* raw_desc = wholememory_tensor_get_tensor_description(raw_data_tensor);
try {
WHOLEMEMORY_CHECK(raw_desc->dim == 2 && raw_desc->storage_offset == 0);
WHOLEMEMORY_CHECK(raw_desc->dtype == matrix_description_.dtype);
WHOLEMEMORY_CHECK(raw_desc->strides[0] == matrix_description_.stride &&
raw_desc->strides[1] == 1);
WHOLEMEMORY_CHECK(raw_desc->sizes[0] == matrix_description_.sizes[0] &&
raw_desc->sizes[1] == matrix_description_.sizes[1]);
} catch (wholememory::logic_error& wle) {
WHOLEMEMORY_ERROR("check_raw_tensor failed.");
return WHOLEMEMORY_LOGIC_ERROR;
}
if (!wholememory_tensor_has_handle(raw_data_tensor)) {
WHOLEMEMORY_ERROR("should be WholeMemory Tensor.");
return WHOLEMEMORY_INVALID_INPUT;
}
auto* root_tensor = wholememory_tensor_get_root(raw_data_tensor);
auto* root_desc = wholememory_tensor_get_tensor_description(root_tensor);
try {
WHOLEMEMORY_CHECK(root_desc->dim == 2 && root_desc->storage_offset == 0);
WHOLEMEMORY_CHECK(root_desc->dtype == padded_matrix_description_.dtype);
WHOLEMEMORY_CHECK(root_desc->strides[0] == padded_matrix_description_.stride &&
root_desc->strides[1] == 1);
WHOLEMEMORY_CHECK(root_desc->sizes[0] == padded_matrix_description_.sizes[0] &&
root_desc->sizes[1] == padded_matrix_description_.sizes[1]);
} catch (wholememory::logic_error& wle) {
WHOLEMEMORY_ERROR("check_raw_tensor failed for root tensor.");
return WHOLEMEMORY_LOGIC_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t embedding_cache_base::compute_cache_set_coverage() noexcept
{
if (cache_policy_ == nullptr) {
WHOLEMEMORY_ERROR("cache_policy_ not set.");
return WHOLEMEMORY_LOGIC_ERROR;
}
float const cache_ratio = cache_policy_->cache_ratio;
if (cache_ratio >= 1.0F || cache_ratio <= 0.0F) {
WHOLEMEMORY_ERROR("Invalid cache ratio %f, should be in range (0.0, 1.0).", cache_ratio);
return WHOLEMEMORY_INVALID_VALUE;
}
cache_set_coverage_ = std::round(kCacheSetSize / cache_ratio);
cache_set_coverage_ = std::min(cache_set_coverage_, kMaxCacheSetCoverage);
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t embedding_cache_base::allocate(
wholememory_tensor_t raw_data_tensor) noexcept
{
WHOLEMEMORY_RETURN_ON_FAIL(check_raw_tensor(raw_data_tensor));
padded_raw_tensor_ = wholememory_tensor_get_root(raw_data_tensor);
auto* padded_raw_desc = wholememory_tensor_get_tensor_description(padded_raw_tensor_);
WHOLEMEMORY_CHECK_NOTHROW(padded_raw_desc != nullptr);
WHOLEMEMORY_CHECK_NOTHROW(padded_raw_desc->dim == 2);
int64_t const padded_embedding_count = padded_embedding_count_for_cache_;
WHOLEMEMORY_CHECK_NOTHROW(padded_embedding_count % cache_set_coverage_ == 0);
int64_t const total_cache_set_count = padded_embedding_count / cache_set_coverage_;
int cache_world_size = 1;
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_communicator_get_size(&cache_world_size, cache_policy_->cache_comm));
WHOLEMEMORY_CHECK_NOTHROW(total_cache_set_count % cache_world_size == 0);
wholememory_tensor_description_t cache_line_meta_desc;
cache_line_meta_desc.dim = 2;
cache_line_meta_desc.dtype = WHOLEMEMORY_DT_INT16;
cache_line_meta_desc.storage_offset = 0;
cache_line_meta_desc.sizes[0] = total_cache_set_count;
cache_line_meta_desc.sizes[1] = cache_line_meta_desc.strides[0] = kCacheSetSize;
cache_line_meta_desc.strides[1] = 1;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_create_tensor(&cache_line_tag_wm_tensor_,
&cache_line_meta_desc,
cache_policy_->cache_comm,
cache_policy_->cache_memory_type,
cache_policy_->cache_memory_location));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_create_tensor(&cache_line_lfu_count_wm_tensor_,
&cache_line_meta_desc,
cache_policy_->cache_comm,
cache_policy_->cache_memory_type,
cache_policy_->cache_memory_location));
wholememory_tensor_description_t cache_line_data_desc = cache_line_meta_desc;
cache_line_data_desc.dtype = padded_raw_desc->dtype;
cache_line_data_desc.sizes[0] = total_cache_set_count * kCacheSetSize;
cache_line_data_desc.sizes[1] = padded_raw_desc->sizes[1];
cache_line_data_desc.strides[0] = padded_raw_desc->strides[0];
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_create_tensor(&cache_line_data_wm_tensor_,
&cache_line_data_desc,
cache_policy_->cache_comm,
cache_policy_->cache_memory_type,
cache_policy_->cache_memory_location));
wholememory_tensor_description_t access_count_desc;
access_count_desc.dim = 1;
access_count_desc.storage_offset = 0;
access_count_desc.sizes[0] = padded_embedding_count_for_cache_;
access_count_desc.dtype = WHOLEMEMORY_DT_INT64;
access_count_desc.strides[0] = 1;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_create_tensor(&access_count_wm_tensor_,
&access_count_desc,
cache_policy_->cache_comm,
cache_policy_->cache_memory_type,
cache_policy_->cache_memory_location));
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_tensor_map_local_tensor(cache_line_tag_wm_tensor_, &local_cache_.cache_line_tag_));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_tensor_map_local_tensor(
cache_line_lfu_count_wm_tensor_, &local_cache_.cache_line_lfu_count_));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_tensor_map_local_tensor(cache_line_data_wm_tensor_,
&local_cache_.cache_line_data_));
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_tensor_map_local_tensor(access_count_wm_tensor_, &local_cache_.access_count_));
size_t const local_cache_line_count = wholememory_get_memory_element_count_from_tensor(
wholememory_tensor_get_tensor_description(local_cache_.cache_line_tag_));
WM_CUDA_CHECK_NO_THROW(
cudaMemset(wholememory_tensor_get_data_pointer(local_cache_.cache_line_tag_),
0,
local_cache_line_count * sizeof(int16_t)));
WM_CUDA_CHECK_NO_THROW(
cudaMemset(wholememory_tensor_get_data_pointer(local_cache_.cache_line_lfu_count_),
0,
local_cache_line_count * sizeof(int16_t)));
size_t const local_access_count_count = wholememory_get_memory_element_count_from_tensor(
wholememory_tensor_get_tensor_description(local_cache_.access_count_));
WM_CUDA_CHECK_NO_THROW(cudaMemset(wholememory_tensor_get_data_pointer(local_cache_.access_count_),
0,
local_access_count_count * sizeof(int64_t)));
WM_CUDA_CHECK_NO_THROW(cudaDeviceSynchronize());
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_barrier(cache_policy_->cache_comm));
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t embedding_cache_base::writeback_all_cache(cudaStream_t stream) noexcept
{
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t embedding_cache_base::drop_all_cache(cudaStream_t stream) noexcept
{
return WHOLEMEMORY_SUCCESS;
}
device_cache_for_host::device_cache_for_host(wholememory_embedding_cache_policy_t cache_policy)
: embedding_cache_base(cache_policy)
{
}
device_cache_for_host::~device_cache_for_host() {}
wholememory_error_code_t device_cache_for_host::get_embedding_requirement(
wholememory_tensor_description_t* padded_desc,
wholememory_matrix_description_t data_desc,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location) noexcept
{
if (cache_policy_ == nullptr) {
WHOLEMEMORY_ERROR("No cache policy set.");
return WHOLEMEMORY_LOGIC_ERROR;
}
if (cache_policy_->cache_memory_location != WHOLEMEMORY_ML_DEVICE) {
WHOLEMEMORY_ERROR("device_cache_for_host cache memory should be device.");
return WHOLEMEMORY_INVALID_INPUT;
}
if (comm != cache_policy_->cache_comm) {
WHOLEMEMORY_ERROR("device_cache_for_host cache should use the same communicator as raw data.");
return WHOLEMEMORY_INVALID_VALUE;
}
if (padded_raw_tensor_ != nullptr) {
WHOLEMEMORY_ERROR("embedding_cache already cached other embedding.");
return WHOLEMEMORY_LOGIC_ERROR;
}
if (memory_type > cache_policy_->cache_memory_type) {
WHOLEMEMORY_ERROR("embedding memory_type should support at least cache memory_type.");
return WHOLEMEMORY_INVALID_VALUE;
}
compute_cache_set_coverage();
pad_last_dim(data_desc);
int64_t const embedding_count = matrix_description_.sizes[0];
int world_size = 1;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_get_size(&world_size, comm));
padded_embedding_count_for_cache_ = round_up_unsafe<int64_t>(
embedding_count, static_cast<int64_t>(world_size) * cache_set_coverage_);
padded_matrix_description_.sizes[0] = padded_embedding_count_for_cache_;
wholememory_copy_matrix_desc_to_tensor(padded_desc, &padded_matrix_description_);
raw_comm_ = comm;
raw_memory_location_ = memory_location;
raw_memory_type_ = memory_type;
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t device_cache_for_host::writeback_all_cache(cudaStream_t stream) noexcept
{
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_ops::writeback_cache_direct_same_comm(
padded_raw_tensor_, &local_cache_, cache_set_coverage_, false, stream));
WM_CUDA_CHECK_NO_THROW(cudaStreamSynchronize(stream));
wholememory_comm_t wm_comm;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_get_communicator(
&wm_comm, wholememory_tensor_get_memory_handle(padded_raw_tensor_)));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_barrier(wm_comm));
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t device_cache_for_host::drop_all_cache(cudaStream_t stream) noexcept
{
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_ops::writeback_cache_direct_same_comm(
padded_raw_tensor_, &local_cache_, cache_set_coverage_, true, stream));
WM_CUDA_CHECK_NO_THROW(cudaStreamSynchronize(stream));
wholememory_comm_t wm_comm;
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_get_communicator(
&wm_comm, wholememory_tensor_get_memory_handle(padded_raw_tensor_)));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_barrier(wm_comm));
return WHOLEMEMORY_SUCCESS;
}
local_cache_for_global::local_cache_for_global(wholememory_embedding_cache_policy_t cache_policy)
: embedding_cache_base(cache_policy)
{
}
local_cache_for_global::~local_cache_for_global() {}
wholememory_error_code_t local_cache_for_global::get_embedding_requirement(
wholememory_tensor_description_t* padded_desc,
wholememory_matrix_description_t data_desc,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location) noexcept
{
if (cache_policy_ == nullptr) {
WHOLEMEMORY_ERROR("No cache policy set.");
return WHOLEMEMORY_LOGIC_ERROR;
}
if (cache_policy_->cache_memory_type > WHOLEMEMORY_MT_CHUNKED) {
WHOLEMEMORY_ERROR(
"local_cache_for_global cache should support at least WHOLEMEMORY_MT_CHUNKED for now.");
return WHOLEMEMORY_NOT_IMPLEMENTED;
}
if (padded_raw_tensor_ != nullptr) {
WHOLEMEMORY_ERROR("embedding_cache already cached other embedding.");
return WHOLEMEMORY_LOGIC_ERROR;
}
if (cache_policy_->access_type != WHOLEMEMORY_AT_READONLY) {
WHOLEMEMORY_ERROR("local_cache_for_global only READONLY cache supported.");
return WHOLEMEMORY_NOT_IMPLEMENTED;
}
compute_cache_set_coverage();
pad_last_dim(data_desc);
int64_t const embedding_count = matrix_description_.sizes[0];
int cache_world_size = 1;
if (cache_policy_->cache_comm != nullptr) {
WHOLEMEMORY_RETURN_ON_FAIL(
wholememory_communicator_get_size(&cache_world_size, cache_policy_->cache_comm));
}
padded_embedding_count_for_cache_ = round_up_unsafe<int64_t>(
embedding_count, static_cast<int64_t>(cache_world_size) * cache_set_coverage_);
padded_matrix_description_.sizes[0] = padded_embedding_count_for_cache_;
wholememory_copy_matrix_desc_to_tensor(padded_desc, &padded_matrix_description_);
raw_comm_ = comm;
raw_memory_location_ = memory_location;
raw_memory_type_ = memory_type;
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t local_cache_for_global::drop_all_cache(cudaStream_t stream) noexcept
{
wholememory_tensor_t local_tag_tensor = local_cache_.cache_line_tag_;
size_t local_cache_line_size = wholememory_get_memory_size_from_tensor(
wholememory_tensor_get_tensor_description(local_tag_tensor));
WM_CUDA_CHECK_NO_THROW(cudaMemsetAsync(
wholememory_tensor_get_data_pointer(local_tag_tensor), 0, local_cache_line_size, stream));
WM_CUDA_CHECK_NO_THROW(cudaStreamSynchronize(stream));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_barrier(cache_policy_->cache_comm));
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/file_io.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "file_io.h"
#include <sys/stat.h>
#include <unistd.h>
#include <string>
#include <vector>
#include "communicator.hpp"
#include "error.hpp"
#include "logger.hpp"
namespace wholememory {
static bool IsFileExist(const char* filename, int mode) { return access(filename, mode) == 0; }
static size_t StatFileSize(const char* filename)
{
auto filesize = static_cast<size_t>(-1);
struct stat statbuf {};
if (stat(filename, &statbuf) < 0) { return filesize; }
filesize = statbuf.st_size;
return filesize;
}
static size_t get_handle_partial_size(size_t handle_size,
size_t memory_offset,
size_t memory_entry_stride,
size_t entry_size)
{
handle_size -= memory_offset;
size_t tail = handle_size % memory_entry_stride;
if (tail != 0 && tail < entry_size) {
WHOLEMEMORY_FAIL_NOTHROW(
"handle_size=%ld, memory_offset=%ld, memory_entry_stride=%ld, entry_size=%ld, tail=%ld is "
"not 0"
" or >= entry_size.",
handle_size,
memory_offset,
memory_entry_stride,
entry_size,
tail);
}
size_t partial_size = 0;
if (tail != 0) partial_size = entry_size;
partial_size += (handle_size / memory_entry_stride) * entry_size;
return partial_size;
}
wholememory_error_code_t load_file_to_handle(wholememory_handle_t wholememory_handle,
size_t memory_offset,
size_t memory_entry_stride,
size_t entry_size,
const char** file_names,
int file_count) noexcept
{
if (entry_size <= 0 || memory_offset < 0 || memory_offset + entry_size > memory_entry_stride) {
WHOLEMEMORY_ERROR("Invalid input, entry_size=%ld, memory_entry_stride=%ld, memory_offset=%ld",
entry_size,
memory_entry_stride,
memory_offset);
return WHOLEMEMORY_INVALID_INPUT;
}
size_t wm_data_granularity = wholememory_get_data_granularity(wholememory_handle);
if (wm_data_granularity % memory_entry_stride != 0) {
WHOLEMEMORY_ERROR("Invalid input, memory_entry_stride=%ld, but wm_data_granularity=%ld",
memory_entry_stride,
wm_data_granularity);
return WHOLEMEMORY_INVALID_INPUT;
}
size_t wm_total_size = wholememory_get_total_size(wholememory_handle);
size_t expected_file_size =
get_handle_partial_size(wm_total_size, memory_offset, memory_entry_stride, entry_size);
if (file_count < 0 || file_count >= 65536) {
WHOLEMEMORY_ERROR("input file count=%d", file_count);
return WHOLEMEMORY_INVALID_INPUT;
}
std::vector<size_t> file_sizes(file_count, 0);
size_t file_total_size = 0;
for (int i = 0; i < file_count; i++) {
if (file_names[i] == nullptr) {
WHOLEMEMORY_ERROR("input file %d of %d is nullptr.", i, file_count);
return WHOLEMEMORY_INVALID_INPUT;
}
if (!IsFileExist(file_names[i], R_OK)) {
WHOLEMEMORY_ERROR(
"input_file[%d] of %d (%s) cannot open for read.", i, file_count, file_names[i]);
return WHOLEMEMORY_INVALID_INPUT;
}
file_sizes[i] = StatFileSize(file_names[i]);
if (file_sizes[i] == static_cast<size_t>(-1)) {
WHOLEMEMORY_ERROR(
"input_file[%d] of %d (%s) stat size failed.", i, file_count, file_names[i]);
return WHOLEMEMORY_INVALID_INPUT;
}
if (file_sizes[i] % entry_size != 0) {
WHOLEMEMORY_ERROR("input_file[%d] of %d (%s) size=%ld, but entry_size=%ld failed.",
i,
file_count,
file_names[i],
file_sizes[i],
entry_size);
return WHOLEMEMORY_INVALID_INPUT;
}
file_total_size += file_sizes[i];
}
if (file_total_size > expected_file_size) {
WHOLEMEMORY_ERROR("all %d input file size is %ld, but expected %ld",
file_count,
file_total_size,
expected_file_size);
return WHOLEMEMORY_INVALID_VALUE;
}
try {
wholememory_comm_t wm_comm;
WHOLEMEMORY_CHECK(wholememory_get_communicator(&wm_comm, wholememory_handle) ==
WHOLEMEMORY_SUCCESS);
int wm_rank;
WHOLEMEMORY_CHECK(wholememory_communicator_get_rank(&wm_rank, wm_comm) == WHOLEMEMORY_SUCCESS);
WM_COMM_CHECK_ALL_SAME(wm_comm, file_count);
for (int i = 0; i < file_count; i++) {
WM_COMM_CHECK_ALL_SAME(wm_comm, file_sizes[i]);
}
char* local_ptr = nullptr;
size_t local_size, local_offset;
WHOLEMEMORY_CHECK(wholememory_get_local_memory(
(void**)(&local_ptr), &local_size, &local_offset, wholememory_handle) ==
WHOLEMEMORY_SUCCESS);
constexpr int kSuggestedBufferSize = 16 * 1024 * 1024;
size_t buffer_size;
size_t buffer_entry_count = 1;
if (kSuggestedBufferSize < entry_size) {
buffer_size = entry_size;
} else {
buffer_entry_count = kSuggestedBufferSize / entry_size;
buffer_size = buffer_entry_count * entry_size;
}
std::vector<char> file_read_buffer(buffer_size);
size_t local_entry_memory_start_index = local_offset / memory_entry_stride;
size_t local_entry_file_start_index =
local_entry_memory_start_index - memory_offset / memory_entry_stride;
size_t local_entry_count = local_size / memory_entry_stride;
char* local_write_ptr = local_ptr + memory_offset % memory_entry_stride;
if (wm_rank == 0) {
local_entry_count -= memory_offset / memory_entry_stride;
local_write_ptr += (memory_offset / memory_entry_stride) * memory_entry_stride;
}
size_t local_entry_idx = 0;
size_t file_entry_offset = 0;
size_t total_read_bytes = 0;
for (int i = 0; i < file_count; i++) {
size_t file_entry_count = file_sizes[i] / entry_size;
// already outside reading window
if (file_entry_offset >= local_entry_file_start_index + local_entry_count) break;
// in reading window
if (file_entry_offset + file_entry_count > local_entry_file_start_index) {
size_t file_read_start_offset = 0;
FILE* fp = fopen(file_names[i], "rb");
if (fp == nullptr) { WHOLEMEMORY_ERROR("Open file %s for read failed.", file_names[i]); }
// maybe in window end, remove possible tailing data that don't belong to current rank.
size_t to_read_file_entry_count = std::min(
file_entry_count, local_entry_file_start_index + local_entry_count - file_entry_offset);
// if in window begin, remove possible data that belongs to previous rank and skip disk
// data.
if (file_entry_offset < local_entry_file_start_index) {
size_t skip_entry_count = local_entry_file_start_index - file_entry_offset;
file_read_start_offset = skip_entry_count * entry_size;
if (fseeko(fp, file_read_start_offset, SEEK_SET) != 0) {
WHOLEMEMORY_ERROR(
"File %s seek to %ld failed.", file_names[i], skip_entry_count * entry_size);
}
to_read_file_entry_count -= skip_entry_count;
}
// now all data in file_entry_count need to be read.
size_t bytes_to_read = to_read_file_entry_count * entry_size;
size_t left_entry_count = to_read_file_entry_count;
while (left_entry_count > 0) {
size_t read_entry_count = std::min(left_entry_count, buffer_entry_count);
int ret = fread(file_read_buffer.data(), entry_size, read_entry_count, fp);
if (ret != read_entry_count) {
WHOLEMEMORY_ERROR(
"File %s line %d: reading from file %s, read_entry_count=%ld, entry_size=%ld, "
"returned %d, error=%s\n",
__FILE__,
__LINE__,
file_names[i],
read_entry_count,
entry_size,
ret,
strerror(errno));
}
if (entry_size != memory_entry_stride) {
WM_CUDA_CHECK(cudaMemcpy2D(local_write_ptr,
memory_entry_stride,
file_read_buffer.data(),
entry_size,
entry_size,
read_entry_count,
cudaMemcpyDefault));
} else {
WM_CUDA_CHECK(cudaMemcpy(local_write_ptr,
file_read_buffer.data(),
read_entry_count * entry_size,
cudaMemcpyDefault));
}
local_write_ptr += read_entry_count * memory_entry_stride;
left_entry_count -= read_entry_count;
}
fclose(fp);
WHOLEMEMORY_INFO(
"Rank=%d done Reading %ld bytes from file %s size=%ld, starting from offset=%ld.",
wm_rank,
bytes_to_read,
file_names[i],
file_sizes[i],
file_read_start_offset);
total_read_bytes += bytes_to_read;
}
file_entry_offset += file_entry_count;
}
WHOLEMEMORY_INFO(
"Rank=%d done reading total %ld bytes from needed files.", wm_rank, total_read_bytes);
wm_comm->barrier();
} catch (wholememory::logic_error& wle) {
WHOLEMEMORY_ERROR("Logic error: %s", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (wholememory::cuda_error& wce) {
WHOLEMEMORY_ERROR("CUDA error: %s", wce.what());
return WHOLEMEMORY_CUDA_ERROR;
} catch (...) {
WHOLEMEMORY_ERROR("Unknow error caught at file %s, line %d", __FILE__, __LINE__);
return WHOLEMEMORY_UNKNOW_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
wholememory_error_code_t store_handle_to_file(wholememory_handle_t wholememory_handle,
size_t memory_offset,
size_t memory_entry_stride,
size_t entry_size,
const char* local_file_name) noexcept
{
if (entry_size <= 0 || memory_offset < 0 || memory_offset + entry_size > memory_entry_stride) {
WHOLEMEMORY_ERROR("Invalid input, entry_size=%ld, memory_entry_stride=%ld, memory_offset=%ld",
entry_size,
memory_entry_stride,
memory_offset);
return WHOLEMEMORY_INVALID_INPUT;
}
size_t wm_data_granularity = wholememory_get_data_granularity(wholememory_handle);
if (wm_data_granularity % memory_entry_stride != 0) {
WHOLEMEMORY_ERROR("Invalid input, memory_entry_stride=%ld, but wm_data_granularity=%ld",
memory_entry_stride,
wm_data_granularity);
return WHOLEMEMORY_INVALID_INPUT;
}
try {
wholememory_comm_t wm_comm;
WHOLEMEMORY_CHECK(wholememory_get_communicator(&wm_comm, wholememory_handle) ==
WHOLEMEMORY_SUCCESS);
int wm_rank;
WHOLEMEMORY_CHECK(wholememory_communicator_get_rank(&wm_rank, wm_comm) == WHOLEMEMORY_SUCCESS);
char* local_ptr = nullptr;
size_t local_size, local_offset;
wm_comm->barrier();
WHOLEMEMORY_CHECK(wholememory_get_local_memory(
(void**)(&local_ptr), &local_size, &local_offset, wholememory_handle) ==
WHOLEMEMORY_SUCCESS);
constexpr int kSuggestedBufferSize = 16 * 1024 * 1024;
size_t buffer_size;
size_t buffer_entry_count = 1;
if (kSuggestedBufferSize < entry_size) {
buffer_size = entry_size;
} else {
buffer_entry_count = kSuggestedBufferSize / entry_size;
buffer_size = buffer_entry_count * entry_size;
}
std::vector<char> file_write_buffer(buffer_size);
size_t local_entry_count = local_size / memory_entry_stride;
char* local_write_ptr = local_ptr + memory_offset % memory_entry_stride;
if (wm_rank == 0) {
local_entry_count -= memory_offset / memory_entry_stride;
local_write_ptr += (memory_offset / memory_entry_stride) * memory_entry_stride;
}
FILE* fp = fopen(local_file_name, "wb");
if (fp == nullptr) {
WHOLEMEMORY_ERROR("Rank=%d, open output file %s failed.\n", wm_rank, local_file_name);
}
size_t left_entry_count = local_entry_count;
while (left_entry_count > 0) {
size_t write_entry_count = std::min(left_entry_count, buffer_entry_count);
if (entry_size != memory_entry_stride) {
WM_CUDA_CHECK(cudaMemcpy2D(file_write_buffer.data(),
entry_size,
local_write_ptr,
memory_entry_stride,
entry_size,
write_entry_count,
cudaMemcpyDefault));
} else {
WM_CUDA_CHECK(cudaMemcpy(file_write_buffer.data(),
local_write_ptr,
write_entry_count * entry_size,
cudaMemcpyDefault));
}
local_write_ptr += write_entry_count * memory_entry_stride;
int ret = fwrite(file_write_buffer.data(), entry_size, write_entry_count, fp);
if (ret != write_entry_count) {
WHOLEMEMORY_ERROR(
"File %s line %d: writing to file %s, write_entry_count=%ld, entry_size=%ld, "
"returned %d, error=%s\n",
__FILE__,
__LINE__,
local_file_name,
write_entry_count,
entry_size,
ret,
strerror(errno));
}
left_entry_count -= write_entry_count;
}
fclose(fp);
WHOLEMEMORY_INFO("Rank=%d done writing to file %s.", wm_rank, local_file_name);
wm_comm->barrier();
} catch (wholememory::logic_error& wle) {
WHOLEMEMORY_ERROR("Logic error: %s", wle.what());
return WHOLEMEMORY_LOGIC_ERROR;
} catch (wholememory::cuda_error& wce) {
WHOLEMEMORY_ERROR("CUDA error: %s", wce.what());
return WHOLEMEMORY_CUDA_ERROR;
} catch (...) {
WHOLEMEMORY_ERROR("Unknow error caught at file %s, line %d", __FILE__, __LINE__);
return WHOLEMEMORY_UNKNOW_ERROR;
}
return WHOLEMEMORY_SUCCESS;
}
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/initialize.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime_api.h>
#include <wholememory/wholememory.h>
namespace wholememory {
wholememory_error_code_t init(unsigned int flags) noexcept;
wholememory_error_code_t finalize() noexcept;
/**
* return cudaDeviceProp of dev_id, if dev_id is -1, use current device
* @param dev_id : device id, -1 for current device
* @return : cudaDeviceProp pointer
*/
cudaDeviceProp* get_device_prop(int dev_id) noexcept;
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/embedding.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/embedding.h>
#include <wholememory/wholememory_tensor.h>
#include "embedding_optimizer.hpp"
#ifdef __cplusplus
extern "C" {
#endif
struct wholememory_embedding_ {
wholememory_tensor_t allocated_embedding = nullptr;
wholememory_tensor_t user_embedding = nullptr; // subtensor of allocated_embedding
wholememory_embedding_cache_policy_t cache_policy = nullptr;
wholememory_embedding_optimizer_t optimizer = nullptr;
};
#ifdef __cplusplus
}
#endif
namespace wholememory {
class embedding_base : public wholememory_embedding_ {
public:
embedding_base() = default;
virtual ~embedding_base() = default;
wholememory_error_code_t allocate(wholememory_matrix_description_t* embedding_description,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
wholememory_embedding_cache_policy_t policy,
wholememory_embedding_optimizer_t opt) noexcept;
void deallocate() noexcept;
virtual wholememory_error_code_t gather(wholememory_tensor_t indices,
wholememory_tensor_t output,
bool adjust_cache,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream) noexcept = 0;
wholememory_error_code_t gather_gradient_apply(wholememory_tensor_t indices,
wholememory_tensor_t grads,
bool adjust_cache,
float lr,
wholememory_env_func_t* p_env_fns,
cudaStream_t stream);
[[nodiscard]] const char* const* get_optimizer_state_names() const noexcept
{
if (optimizer_impl_base_ != nullptr) {
return optimizer_impl_base_->get_optimizer_state_names();
}
return nullptr;
}
virtual wholememory_tensor_t get_optimizer_state(const char* state_name) const noexcept
{
if (optimizer_impl_base_ != nullptr) {
return optimizer_impl_base_->get_optimizer_state(optimizer_state_.get(), state_name);
}
return nullptr;
}
virtual wholememory_error_code_t writeback_embedding_cache(cudaStream_t stream) const noexcept;
virtual wholememory_error_code_t writeback_all_caches(cudaStream_t stream) const noexcept;
virtual wholememory_error_code_t drop_embedding_cache(cudaStream_t stream) const noexcept;
virtual wholememory_error_code_t drop_all_caches(cudaStream_t stream) const noexcept;
wholememory::embedding_cache_base* get_cache_ptr() const { return cache_ptr_; }
protected:
virtual wholememory_error_code_t init_optimizer_states() noexcept
{
if (optimizer_impl_base_ != nullptr) {
WHOLEMEMORY_RETURN_ON_FAIL(
optimizer_impl_base_->init_optimizer_states(optimizer_state_.get()));
WHOLEMEMORY_RETURN_ON_FAIL(wholememory_communicator_barrier(raw_embedding_comm_));
return WHOLEMEMORY_SUCCESS;
}
return WHOLEMEMORY_LOGIC_ERROR;
}
wholememory_error_code_t create_optimizer_states() noexcept;
wholememory_error_code_t destroy_optimizer_states() noexcept;
wholememory_comm_t raw_embedding_comm_ = nullptr;
wholememory::embedding_cache_base* cache_ptr_ = nullptr;
wholememory::embedding_optimizer_impl_base* optimizer_impl_base_ = nullptr;
std::unique_ptr<wholememory::optimizer_state_t> optimizer_state_ = nullptr;
};
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp/src
|
rapidsai_public_repos/wholegraph/cpp/src/wholememory/env_func_ptrs.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <wholememory/env_func_ptrs.hpp>
#include <memory>
#include <mutex>
#include <queue>
#include <vector>
#include "cuda_macros.hpp"
#include "error.hpp"
#include "initialize.hpp"
namespace wholememory {
void default_create_memory_context_func(void** memory_context, void* /*global_context*/)
{
auto* default_memory_context = new default_memory_context_t;
wholememory_initialize_tensor_desc(&default_memory_context->desc);
default_memory_context->ptr = nullptr;
default_memory_context->allocation_type = WHOLEMEMORY_MA_NONE;
*memory_context = default_memory_context;
}
void default_destroy_memory_context_func(void* memory_context, void* /*global_context*/)
{
auto* default_memory_context = static_cast<default_memory_context_t*>(memory_context);
delete default_memory_context;
}
void* default_malloc_func(wholememory_tensor_description_t* tensor_description,
wholememory_memory_allocation_type_t memory_allocation_type,
void* memory_context,
void* /*global_context*/)
{
auto* default_memory_context = static_cast<default_memory_context_t*>(memory_context);
void* ptr = nullptr;
try {
if (memory_allocation_type == WHOLEMEMORY_MA_HOST) {
ptr = malloc(wholememory_get_memory_size_from_tensor(tensor_description));
if (ptr == nullptr) { WHOLEMEMORY_FAIL_NOTHROW("malloc returned nullptr.\n"); }
} else if (memory_allocation_type == WHOLEMEMORY_MA_PINNED) {
WM_CUDA_CHECK(
cudaMallocHost(&ptr, wholememory_get_memory_size_from_tensor(tensor_description)));
} else if (memory_allocation_type == WHOLEMEMORY_MA_DEVICE) {
WM_CUDA_CHECK(cudaMalloc(&ptr, wholememory_get_memory_size_from_tensor(tensor_description)));
} else {
WHOLEMEMORY_FAIL_NOTHROW("memory_allocation_type incorrect.\n");
}
} catch (wholememory::cuda_error& wce) {
WHOLEMEMORY_FAIL_NOTHROW("cudaMalloc failed, %s.\n", wce.what());
}
default_memory_context->desc = *tensor_description;
default_memory_context->ptr = ptr;
default_memory_context->allocation_type = memory_allocation_type;
return ptr;
}
void default_free_func(void* memory_context, void* /*global_context*/)
{
auto* default_memory_context = static_cast<default_memory_context_t*>(memory_context);
auto memory_allocation_type = default_memory_context->allocation_type;
if (memory_allocation_type == WHOLEMEMORY_MA_HOST) {
free(default_memory_context->ptr);
} else if (memory_allocation_type == WHOLEMEMORY_MA_PINNED) {
WM_CUDA_CHECK(cudaFreeHost(default_memory_context->ptr));
} else if (memory_allocation_type == WHOLEMEMORY_MA_DEVICE) {
WM_CUDA_CHECK(cudaFree(default_memory_context->ptr));
} else {
WHOLEMEMORY_FAIL_NOTHROW("memory_allocation_type incorrect.\n");
}
wholememory_initialize_tensor_desc(&default_memory_context->desc);
default_memory_context->ptr = nullptr;
default_memory_context->allocation_type = WHOLEMEMORY_MA_NONE;
}
static wholememory_env_func_t default_env_func = {
.temporary_fns =
{
.create_memory_context_fn = default_create_memory_context_func,
.destroy_memory_context_fn = default_destroy_memory_context_func,
.malloc_fn = default_malloc_func,
.free_fn = default_free_func,
.global_context = nullptr,
},
.output_fns = {
.malloc_fn = default_malloc_func,
.free_fn = default_free_func,
.global_context = nullptr,
}};
wholememory_env_func_t* get_default_env_func() { return &default_env_func; }
class ChunkedMemoryPool {
public:
ChunkedMemoryPool();
~ChunkedMemoryPool();
void* CachedMalloc(size_t size);
void CachedFree(void* ptr, size_t size);
void EmptyCache();
virtual void* MallocFnImpl(size_t size) = 0;
virtual void FreeFnImpl(void* ptr) = 0;
private:
static constexpr int kBucketCount = 64;
std::vector<std::unique_ptr<std::mutex>> mutexes_;
std::vector<std::queue<void*>> sized_pool_;
};
static size_t GetChunkIndex(size_t size)
{
if (size == 0) return 0;
int power = 0;
size_t shifted_size = size;
while (shifted_size) {
shifted_size >>= 1;
power++;
}
if ((size & (size - 1)) == 0) {
return power - 1;
} else {
return power;
}
}
ChunkedMemoryPool::ChunkedMemoryPool()
{
sized_pool_.resize(kBucketCount);
mutexes_.resize(kBucketCount);
for (int i = 0; i < kBucketCount; i++) {
mutexes_[i] = std::make_unique<std::mutex>();
}
}
ChunkedMemoryPool::~ChunkedMemoryPool() {}
void* ChunkedMemoryPool::CachedMalloc(size_t size)
{
size_t chunked_index = GetChunkIndex(size);
std::unique_lock<std::mutex> mlock(*mutexes_[chunked_index]);
if (!sized_pool_[chunked_index].empty()) {
void* ptr = sized_pool_[chunked_index].front();
sized_pool_[chunked_index].pop();
return ptr;
} else {
return MallocFnImpl(1ULL << chunked_index);
}
return nullptr;
}
void ChunkedMemoryPool::CachedFree(void* ptr, size_t size)
{
size_t chunked_index = GetChunkIndex(size);
std::unique_lock<std::mutex> mlock(*mutexes_[chunked_index]);
sized_pool_[chunked_index].push(ptr);
}
void ChunkedMemoryPool::EmptyCache()
{
for (int i = 0; i < kBucketCount; i++) {
std::unique_lock<std::mutex> mlock(*mutexes_[i]);
while (!sized_pool_[i].empty()) {
FreeFnImpl(sized_pool_[i].front());
sized_pool_[i].pop();
}
}
}
class DeviceChunkedMemoryPool : public ChunkedMemoryPool {
public:
explicit DeviceChunkedMemoryPool(int device_id);
~DeviceChunkedMemoryPool();
void* MallocFnImpl(size_t size) override;
void FreeFnImpl(void* ptr) override;
protected:
int device_id_ = -1;
};
DeviceChunkedMemoryPool::DeviceChunkedMemoryPool(int device_id) : device_id_(device_id) {}
DeviceChunkedMemoryPool::~DeviceChunkedMemoryPool() {}
void* DeviceChunkedMemoryPool::MallocFnImpl(size_t size)
{
int old_dev;
void* ptr;
WM_CUDA_CHECK(cudaGetDevice(&old_dev));
WM_CUDA_CHECK(cudaSetDevice(device_id_));
WM_CUDA_CHECK(cudaMalloc(&ptr, size));
WM_CUDA_CHECK(cudaSetDevice(old_dev));
return ptr;
}
void DeviceChunkedMemoryPool::FreeFnImpl(void* ptr)
{
int old_dev;
WM_CUDA_CHECK(cudaGetDevice(&old_dev));
WM_CUDA_CHECK(cudaSetDevice(device_id_));
WM_CUDA_CHECK(cudaFree(ptr));
WM_CUDA_CHECK(cudaSetDevice(old_dev));
}
class PinnedChunkedMemoryPool : public ChunkedMemoryPool {
public:
PinnedChunkedMemoryPool() = default;
~PinnedChunkedMemoryPool() = default;
void* MallocFnImpl(size_t size) override;
void FreeFnImpl(void* ptr) override;
};
void* PinnedChunkedMemoryPool::MallocFnImpl(size_t size)
{
void* ptr;
WM_CUDA_CHECK(cudaMallocHost(&ptr, size));
return ptr;
}
void PinnedChunkedMemoryPool::FreeFnImpl(void* ptr) { WM_CUDA_CHECK(cudaFreeHost(ptr)); }
class HostChunkedMemoryPool : public ChunkedMemoryPool {
public:
HostChunkedMemoryPool() = default;
~HostChunkedMemoryPool() = default;
void* MallocFnImpl(size_t size) override;
void FreeFnImpl(void* ptr) override;
};
void* HostChunkedMemoryPool::MallocFnImpl(size_t size) { return malloc(size); }
void HostChunkedMemoryPool::FreeFnImpl(void* ptr) { free(ptr); }
class CachedAllocator {
public:
void* MallocHost(size_t size);
void* MallocDevice(size_t size);
void* MallocPinned(size_t size);
void FreeHost(void* ptr, size_t size);
void FreeDevice(void* ptr, size_t size);
void FreePinned(void* ptr, size_t size);
void DropCaches();
static CachedAllocator* GetInst();
private:
CachedAllocator()
{
device_chunked_mem_pools_.resize(kMaxSupportedDeviceCount);
for (int i = 0; i < kMaxSupportedDeviceCount; i++) {
device_chunked_mem_pools_[i] = std::make_unique<DeviceChunkedMemoryPool>(i);
}
pinned_chunked_mem_pool_ = std::make_unique<PinnedChunkedMemoryPool>();
host_chunked_mem_pool_ = std::make_unique<HostChunkedMemoryPool>();
}
~CachedAllocator() {}
CachedAllocator(const CachedAllocator& ca) = delete;
const CachedAllocator& operator=(const CachedAllocator& ca) = delete;
static CachedAllocator ca_inst_;
std::vector<std::unique_ptr<DeviceChunkedMemoryPool>> device_chunked_mem_pools_;
std::unique_ptr<PinnedChunkedMemoryPool> pinned_chunked_mem_pool_;
std::unique_ptr<HostChunkedMemoryPool> host_chunked_mem_pool_;
static constexpr int kMaxSupportedDeviceCount = 16;
};
CachedAllocator CachedAllocator::ca_inst_;
CachedAllocator* CachedAllocator::GetInst() { return &ca_inst_; }
void* CachedAllocator::MallocHost(size_t size)
{
return host_chunked_mem_pool_->CachedMalloc(size);
}
void CachedAllocator::FreeHost(void* ptr, size_t size)
{
host_chunked_mem_pool_->CachedFree(ptr, size);
}
void* CachedAllocator::MallocDevice(size_t size)
{
int dev_id;
WM_CUDA_CHECK(cudaGetDevice(&dev_id));
return device_chunked_mem_pools_[dev_id]->CachedMalloc(size);
}
void CachedAllocator::FreeDevice(void* ptr, size_t size)
{
int dev_id;
WM_CUDA_CHECK(cudaGetDevice(&dev_id));
device_chunked_mem_pools_[dev_id]->CachedFree(ptr, size);
}
void* CachedAllocator::MallocPinned(size_t size)
{
return pinned_chunked_mem_pool_->CachedMalloc(size);
}
void CachedAllocator::FreePinned(void* ptr, size_t size)
{
pinned_chunked_mem_pool_->CachedFree(ptr, size);
}
void CachedAllocator::DropCaches()
{
for (int i = 0; i < kMaxSupportedDeviceCount; i++) {
device_chunked_mem_pools_[i]->EmptyCache();
}
pinned_chunked_mem_pool_->EmptyCache();
host_chunked_mem_pool_->EmptyCache();
}
void* cached_malloc_func(wholememory_tensor_description_t* tensor_description,
wholememory_memory_allocation_type_t memory_allocation_type,
void* memory_context,
void* /*global_context*/)
{
auto* default_memory_context = static_cast<default_memory_context_t*>(memory_context);
void* ptr = nullptr;
CachedAllocator* cached_inst = CachedAllocator::GetInst();
int devid;
WM_CUDA_CHECK(cudaGetDevice((&devid)));
try {
if (memory_allocation_type == WHOLEMEMORY_MA_HOST) {
ptr = cached_inst->MallocHost(wholememory_get_memory_size_from_tensor(tensor_description));
if (ptr == nullptr) { WHOLEMEMORY_FAIL_NOTHROW("cached malloc host returned nullptr.\n"); }
} else if (memory_allocation_type == WHOLEMEMORY_MA_PINNED) {
ptr = cached_inst->MallocPinned(wholememory_get_memory_size_from_tensor(tensor_description));
if (ptr == nullptr) { WHOLEMEMORY_FAIL_NOTHROW("cached malloc pinned returned nullptr.\n"); }
} else if (memory_allocation_type == WHOLEMEMORY_MA_DEVICE) {
ptr = cached_inst->MallocDevice(wholememory_get_memory_size_from_tensor(tensor_description));
if (ptr == nullptr) { WHOLEMEMORY_FAIL_NOTHROW("cached malloc device returned nullptr.\n"); }
} else {
WHOLEMEMORY_FAIL_NOTHROW("memory_allocation_type incorrect.\n");
}
} catch (wholememory::cuda_error& wce) {
WHOLEMEMORY_FAIL_NOTHROW("cudaMalloc failed, %s.\n", wce.what());
}
default_memory_context->desc = *tensor_description;
default_memory_context->ptr = ptr;
default_memory_context->allocation_type = memory_allocation_type;
return ptr;
}
void cached_free_func(void* memory_context, void* /*global_context*/)
{
CachedAllocator* cached_inst = CachedAllocator::GetInst();
auto* default_memory_context = static_cast<default_memory_context_t*>(memory_context);
auto memory_allocation_type = default_memory_context->allocation_type;
if (memory_allocation_type == WHOLEMEMORY_MA_HOST) {
cached_inst->FreeHost(default_memory_context->ptr,
wholememory_get_memory_size_from_tensor(&default_memory_context->desc));
} else if (memory_allocation_type == WHOLEMEMORY_MA_PINNED) {
cached_inst->FreePinned(default_memory_context->ptr,
wholememory_get_memory_size_from_tensor(&default_memory_context->desc));
} else if (memory_allocation_type == WHOLEMEMORY_MA_DEVICE) {
cached_inst->FreeDevice(default_memory_context->ptr,
wholememory_get_memory_size_from_tensor(&default_memory_context->desc));
} else {
WHOLEMEMORY_FAIL_NOTHROW("memory_allocation_type incorrect.\n");
}
wholememory_initialize_tensor_desc(&default_memory_context->desc);
default_memory_context->ptr = nullptr;
default_memory_context->allocation_type = WHOLEMEMORY_MA_NONE;
}
static wholememory_env_func_t cached_env_func = {
.temporary_fns =
{
.create_memory_context_fn = default_create_memory_context_func,
.destroy_memory_context_fn = default_destroy_memory_context_func,
.malloc_fn = cached_malloc_func,
.free_fn = cached_free_func,
.global_context = nullptr,
},
.output_fns = {
.malloc_fn = cached_malloc_func,
.free_fn = cached_free_func,
.global_context = nullptr,
}};
wholememory_env_func_t* get_cached_env_func() { return &cached_env_func; }
void drop_cached_env_func_cache() { CachedAllocator::GetInst()->DropCaches(); }
} // namespace wholememory
#ifdef __cplusplus
extern "C" {
#endif
cudaDeviceProp* get_device_prop(int dev_id) { return wholememory::get_device_prop(dev_id); }
#ifdef __cplusplus
}
#endif
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.