hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
eb9ed6f1da9a32328097ae86857c2d669a65873e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of DeviceSelect::Unique utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <hipcub/hipcub.hpp>
#include <cub/iterator/counting_input_iterator.cuh>
#include <hipcub/hipcub.hpp>
#include <thrust/device_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/system/hip/detail/core/triple_chevron_launch.h>
#include "test_util.h"
#include <cstdio>
#include <typeinfo>
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_timing_iterations = 0;
float g_device_giga_bandwidth;
CachingDeviceAllocator g_allocator(true);
// Dispatch types
enum Backend
{
CUB, // CUB method
CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
};
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceSelect entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to unique entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t &temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceSelect::Unique(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items);
}
return error;
}
//---------------------------------------------------------------------
// CUDA Nested Parallelism Test Kernel
//---------------------------------------------------------------------
#if TEST_CDP == 1
/**
* Simple wrapper kernel to invoke DeviceSelect
*/
template <int CubBackend,
typename InputIteratorT,
typename OutputIteratorT,
typename NumSelectedIteratorT,
typename OffsetT>
__global__ void CDPDispatchKernel(Int2Type<CubBackend> cub_backend,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void *d_temp_storage,
size_t temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items)
{
*d_cdp_error = Dispatch(cub_backend,
timing_timing_iterations,
d_temp_storage_bytes,
d_cdp_error,
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
d_num_selected_out,
num_items);
*d_temp_storage_bytes = temp_storage_bytes;
}
/**
* Dispatch to CDP kernel
*/
template <typename InputIteratorT,
typename OutputIteratorT,
typename NumSelectedIteratorT,
typename OffsetT>
hipError_t Dispatch(Int2Type<CDP> /*dispatch_to*/,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void *d_temp_storage,
size_t &temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items)
{
// Invoke kernel to invoke device-side dispatch
hipError_t retval =
thrust::cuda_cub::launcher::triple_chevron(1, 1, 0, 0)
.doit(CDPDispatchKernel<CUB,
InputIteratorT,
OutputIteratorT,
NumSelectedIteratorT,
OffsetT>,
Int2Type<CUB>{},
timing_timing_iterations,
d_temp_storage_bytes,
d_cdp_error,
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
d_num_selected_out,
num_items);
CubDebugExit(retval);
// Copy out temp_storage_bytes
CubDebugExit(hipMemcpy(&temp_storage_bytes,
d_temp_storage_bytes,
sizeof(size_t) * 1,
hipMemcpyDeviceToHost));
// Copy out error
CubDebugExit(hipMemcpy(&retval,
d_cdp_error,
sizeof(hipError_t) * 1,
hipMemcpyDeviceToHost));
return retval;
}
#endif // TEST_CDP
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
/**
* Initialize problem
*/
template <typename T>
void Initialize(
int entropy_reduction,
T *h_in,
int num_items,
int max_segment)
{
unsigned int max_int = (unsigned int) -1;
int key = 0;
int i = 0;
while (i < num_items)
{
// Select number of repeating occurrences for the current run
int repeat;
if (max_segment < 0)
{
repeat = num_items;
}
else if (max_segment < 2)
{
repeat = 1;
}
else
{
RandomBits(repeat, entropy_reduction);
repeat = (int) ((double(repeat) * double(max_segment)) / double(max_int));
repeat = CUB_MAX(1, repeat);
}
int j = i;
while (j < CUB_MIN(i + repeat, num_items))
{
InitValue(INTEGER_SEED, h_in[j], key);
j++;
}
i = j;
key++;
}
if (g_verbose)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/**
* Solve unique problem
*/
template <
typename InputIteratorT,
typename T>
int Solve(
InputIteratorT h_in,
T *h_reference,
int num_items)
{
int num_selected = 0;
if (num_items > 0)
{
h_reference[num_selected] = h_in[0];
num_selected++;
}
for (int i = 1; i < num_items; ++i)
{
if (h_in[i] != h_in[i - 1])
{
h_reference[num_selected] = h_in[i];
num_selected++;
}
}
return num_selected;
}
/**
* Test DeviceSelect for a given problem input
*/
template <
Backend BACKEND,
typename DeviceInputIteratorT,
typename T>
void Test(
DeviceInputIteratorT d_in,
T *h_reference,
int num_selected,
int num_items)
{
// Allocate device output array and num selected
T *d_out = NULL;
int *d_num_selected_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int)));
// Allocate CDP device arrays
size_t *d_temp_storage_bytes = NULL;
hipError_t *d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(hipError_t) * 1));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(Dispatch(Int2Type<BACKEND>(), 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Clear device output array
CubDebugExit(hipMemset(d_out, 0, sizeof(T) * num_items));
CubDebugExit(hipMemset(d_num_selected_out, 0, sizeof(int)));
// Run warmup/correctness iteration
CubDebugExit(Dispatch(Int2Type<BACKEND>(), 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items));
// Check for correctness (and display results, if specified)
int compare1 = CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose);
printf("\t Data %s ", compare1 ? "FAIL" : "PASS");
int compare2 = CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose);
printf("\t Count %s ", compare2 ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(Int2Type<BACKEND>(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items));
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = float((num_items + num_selected) * sizeof(T)) / avg_millis / 1000.0f / 1000.0f;
printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak", avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0);
}
printf("\n\n");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Cleanup
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out));
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, compare1 | compare2);
}
/**
* Test DeviceSelect on pointer type
*/
template <
Backend BACKEND,
typename T>
void TestPointer(
int num_items,
int entropy_reduction,
int max_segment)
{
// Allocate host arrays
T* h_in = new T[num_items];
T* h_reference = new T[num_items];
// Initialize problem and solution
Initialize(entropy_reduction, h_in, num_items, max_segment);
int num_selected = Solve(h_in, h_reference, num_items);
printf("\nPointer %s hipcub::DeviceSelect::Unique %d items, %d selected (avg run length %.3f), %s %d-byte elements, entropy_reduction %d\n",
(BACKEND == CDP) ? "CDP CUB" : "CUB",
num_items, num_selected, float(num_items) / num_selected,
typeid(T).name(),
(int) sizeof(T),
entropy_reduction);
fflush(stdout);
// Allocate problem device arrays
T *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items));
// Initialize device input
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * num_items, hipMemcpyHostToDevice));
// Run Test
Test<BACKEND>(d_in, h_reference, num_selected, num_items);
// Cleanup
if (h_in) delete[] h_in;
if (h_reference) delete[] h_reference;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
}
/**
* Test DeviceSelect on iterator type
*/
template <
Backend BACKEND,
typename T>
void TestIterator(
int num_items)
{
// Use a counting iterator as the input
CountingInputIterator<T, int> h_in(0);
// Allocate host arrays
T* h_reference = new T[num_items];
// Initialize problem and solution
int num_selected = Solve(h_in, h_reference, num_items);
printf("\nIterator %s hipcub::DeviceSelect::Unique %d items, %d selected (avg run length %.3f), %s %d-byte elements\n",
(BACKEND == CDP) ? "CDP CUB" : "CUB",
num_items, num_selected, float(num_items) / num_selected,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
// Run Test
Test<BACKEND>(h_in, h_reference, num_selected, num_items);
// Cleanup
if (h_reference) delete[] h_reference;
}
/**
* Test different gen modes
*/
template <
Backend BACKEND,
typename T>
void Test(
int num_items)
{
for (int max_segment = 1; ((max_segment > 0) && (max_segment < num_items)); max_segment *= 11)
{
TestPointer<BACKEND, T>(num_items, 0, max_segment);
TestPointer<BACKEND, T>(num_items, 2, max_segment);
TestPointer<BACKEND, T>(num_items, 7, max_segment);
}
}
/**
* Test different dispatch
*/
template <
typename T>
void TestOp(
int num_items)
{
#if TEST_CDP == 0
Test<CUB, T>(num_items);
#elif TEST_CDP == 1
Test<CDP, T>(num_items);
#endif // TEST_CDP
}
/**
* Test different input sizes
*/
template <typename T>
void Test(
int num_items)
{
if (num_items < 0)
{
TestOp<T>(0);
TestOp<T>(1);
TestOp<T>(100);
TestOp<T>(10000);
TestOp<T>(1000000);
}
else
{
TestOp<T>(num_items);
}
}
template <typename T>
void TestIteratorOp(int num_items)
{
void *d_temp_storage{};
std::size_t temp_storage_size{};
thrust::device_vector<int> num_selected(1);
auto in = thrust::make_counting_iterator(static_cast<T>(0));
auto out = thrust::make_discard_iterator();
CubDebugExit(hipcub::DeviceSelect::Unique(d_temp_storage,
temp_storage_size,
in,
out,
num_selected.begin(),
num_items));
thrust::device_vector<char> temp_storage(temp_storage_size);
d_temp_storage = thrust::raw_pointer_cast(temp_storage.data());
CubDebugExit(hipcub::DeviceSelect::Unique(d_temp_storage,
temp_storage_size,
in,
out,
num_selected.begin(),
num_items));
AssertEquals(num_selected[0], num_items);
}
template <typename T>
void TestIterator(int num_items)
{
if (num_items < 0)
{
TestIteratorOp<T>(0);
TestIteratorOp<T>(1);
TestIteratorOp<T>(100);
TestIteratorOp<T>(10000);
TestIteratorOp<T>(1000000);
}
else
{
TestIteratorOp<T>(num_items);
}
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_items = -1;
int entropy_reduction = 0;
int maxseg = 1000;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("n", num_items);
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("maxseg", maxseg);
args.GetCmdLineArgument("entropy", entropy_reduction);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--i=<timing iterations> "
"[--device=<device-id>] "
"[--maxseg=<max segment length>]"
"[--entropy=<segment length bit entropy reduction rounds>]"
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
g_device_giga_bandwidth = args.device_giga_bandwidth;
printf("\n");
// %PARAM% TEST_CDP cdp 0:1
// Test different input types
Test<unsigned char>(num_items);
Test<unsigned short>(num_items);
Test<unsigned int>(num_items);
Test<unsigned long long>(num_items);
Test<uchar2>(num_items);
Test<ushort2>(num_items);
Test<uint2>(num_items);
Test<ulonglong2>(num_items);
Test<uchar4>(num_items);
Test<ushort4>(num_items);
Test<uint4>(num_items);
Test<ulonglong4>(num_items);
Test<TestFoo>(num_items);
Test<TestBar>(num_items);
TestIterator<int>(num_items);
return 0;
}
| eb9ed6f1da9a32328097ae86857c2d669a65873e.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of DeviceSelect::Unique utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <cub/device/device_select.cuh>
#include <cub/iterator/counting_input_iterator.cuh>
#include <cub/util_allocator.cuh>
#include <thrust/device_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/system/cuda/detail/core/triple_chevron_launch.h>
#include "test_util.h"
#include <cstdio>
#include <typeinfo>
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_timing_iterations = 0;
float g_device_giga_bandwidth;
CachingDeviceAllocator g_allocator(true);
// Dispatch types
enum Backend
{
CUB, // CUB method
CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
};
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceSelect entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to unique entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t &temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceSelect::Unique(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items);
}
return error;
}
//---------------------------------------------------------------------
// CUDA Nested Parallelism Test Kernel
//---------------------------------------------------------------------
#if TEST_CDP == 1
/**
* Simple wrapper kernel to invoke DeviceSelect
*/
template <int CubBackend,
typename InputIteratorT,
typename OutputIteratorT,
typename NumSelectedIteratorT,
typename OffsetT>
__global__ void CDPDispatchKernel(Int2Type<CubBackend> cub_backend,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void *d_temp_storage,
size_t temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items)
{
*d_cdp_error = Dispatch(cub_backend,
timing_timing_iterations,
d_temp_storage_bytes,
d_cdp_error,
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
d_num_selected_out,
num_items);
*d_temp_storage_bytes = temp_storage_bytes;
}
/**
* Dispatch to CDP kernel
*/
template <typename InputIteratorT,
typename OutputIteratorT,
typename NumSelectedIteratorT,
typename OffsetT>
cudaError_t Dispatch(Int2Type<CDP> /*dispatch_to*/,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void *d_temp_storage,
size_t &temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items)
{
// Invoke kernel to invoke device-side dispatch
cudaError_t retval =
thrust::cuda_cub::launcher::triple_chevron(1, 1, 0, 0)
.doit(CDPDispatchKernel<CUB,
InputIteratorT,
OutputIteratorT,
NumSelectedIteratorT,
OffsetT>,
Int2Type<CUB>{},
timing_timing_iterations,
d_temp_storage_bytes,
d_cdp_error,
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
d_num_selected_out,
num_items);
CubDebugExit(retval);
// Copy out temp_storage_bytes
CubDebugExit(cudaMemcpy(&temp_storage_bytes,
d_temp_storage_bytes,
sizeof(size_t) * 1,
cudaMemcpyDeviceToHost));
// Copy out error
CubDebugExit(cudaMemcpy(&retval,
d_cdp_error,
sizeof(cudaError_t) * 1,
cudaMemcpyDeviceToHost));
return retval;
}
#endif // TEST_CDP
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
/**
* Initialize problem
*/
template <typename T>
void Initialize(
int entropy_reduction,
T *h_in,
int num_items,
int max_segment)
{
unsigned int max_int = (unsigned int) -1;
int key = 0;
int i = 0;
while (i < num_items)
{
// Select number of repeating occurrences for the current run
int repeat;
if (max_segment < 0)
{
repeat = num_items;
}
else if (max_segment < 2)
{
repeat = 1;
}
else
{
RandomBits(repeat, entropy_reduction);
repeat = (int) ((double(repeat) * double(max_segment)) / double(max_int));
repeat = CUB_MAX(1, repeat);
}
int j = i;
while (j < CUB_MIN(i + repeat, num_items))
{
InitValue(INTEGER_SEED, h_in[j], key);
j++;
}
i = j;
key++;
}
if (g_verbose)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/**
* Solve unique problem
*/
template <
typename InputIteratorT,
typename T>
int Solve(
InputIteratorT h_in,
T *h_reference,
int num_items)
{
int num_selected = 0;
if (num_items > 0)
{
h_reference[num_selected] = h_in[0];
num_selected++;
}
for (int i = 1; i < num_items; ++i)
{
if (h_in[i] != h_in[i - 1])
{
h_reference[num_selected] = h_in[i];
num_selected++;
}
}
return num_selected;
}
/**
* Test DeviceSelect for a given problem input
*/
template <
Backend BACKEND,
typename DeviceInputIteratorT,
typename T>
void Test(
DeviceInputIteratorT d_in,
T *h_reference,
int num_selected,
int num_items)
{
// Allocate device output array and num selected
T *d_out = NULL;
int *d_num_selected_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int)));
// Allocate CDP device arrays
size_t *d_temp_storage_bytes = NULL;
cudaError_t *d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(Dispatch(Int2Type<BACKEND>(), 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Clear device output array
CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * num_items));
CubDebugExit(cudaMemset(d_num_selected_out, 0, sizeof(int)));
// Run warmup/correctness iteration
CubDebugExit(Dispatch(Int2Type<BACKEND>(), 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items));
// Check for correctness (and display results, if specified)
int compare1 = CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose);
printf("\t Data %s ", compare1 ? "FAIL" : "PASS");
int compare2 = CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose);
printf("\t Count %s ", compare2 ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(Int2Type<BACKEND>(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items));
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = float((num_items + num_selected) * sizeof(T)) / avg_millis / 1000.0f / 1000.0f;
printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak", avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0);
}
printf("\n\n");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Cleanup
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out));
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, compare1 | compare2);
}
/**
* Test DeviceSelect on pointer type
*/
template <
Backend BACKEND,
typename T>
void TestPointer(
int num_items,
int entropy_reduction,
int max_segment)
{
// Allocate host arrays
T* h_in = new T[num_items];
T* h_reference = new T[num_items];
// Initialize problem and solution
Initialize(entropy_reduction, h_in, num_items, max_segment);
int num_selected = Solve(h_in, h_reference, num_items);
printf("\nPointer %s cub::DeviceSelect::Unique %d items, %d selected (avg run length %.3f), %s %d-byte elements, entropy_reduction %d\n",
(BACKEND == CDP) ? "CDP CUB" : "CUB",
num_items, num_selected, float(num_items) / num_selected,
typeid(T).name(),
(int) sizeof(T),
entropy_reduction);
fflush(stdout);
// Allocate problem device arrays
T *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items));
// Initialize device input
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * num_items, cudaMemcpyHostToDevice));
// Run Test
Test<BACKEND>(d_in, h_reference, num_selected, num_items);
// Cleanup
if (h_in) delete[] h_in;
if (h_reference) delete[] h_reference;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
}
/**
* Test DeviceSelect on iterator type
*/
template <
Backend BACKEND,
typename T>
void TestIterator(
int num_items)
{
// Use a counting iterator as the input
CountingInputIterator<T, int> h_in(0);
// Allocate host arrays
T* h_reference = new T[num_items];
// Initialize problem and solution
int num_selected = Solve(h_in, h_reference, num_items);
printf("\nIterator %s cub::DeviceSelect::Unique %d items, %d selected (avg run length %.3f), %s %d-byte elements\n",
(BACKEND == CDP) ? "CDP CUB" : "CUB",
num_items, num_selected, float(num_items) / num_selected,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
// Run Test
Test<BACKEND>(h_in, h_reference, num_selected, num_items);
// Cleanup
if (h_reference) delete[] h_reference;
}
/**
* Test different gen modes
*/
template <
Backend BACKEND,
typename T>
void Test(
int num_items)
{
for (int max_segment = 1; ((max_segment > 0) && (max_segment < num_items)); max_segment *= 11)
{
TestPointer<BACKEND, T>(num_items, 0, max_segment);
TestPointer<BACKEND, T>(num_items, 2, max_segment);
TestPointer<BACKEND, T>(num_items, 7, max_segment);
}
}
/**
* Test different dispatch
*/
template <
typename T>
void TestOp(
int num_items)
{
#if TEST_CDP == 0
Test<CUB, T>(num_items);
#elif TEST_CDP == 1
Test<CDP, T>(num_items);
#endif // TEST_CDP
}
/**
* Test different input sizes
*/
template <typename T>
void Test(
int num_items)
{
if (num_items < 0)
{
TestOp<T>(0);
TestOp<T>(1);
TestOp<T>(100);
TestOp<T>(10000);
TestOp<T>(1000000);
}
else
{
TestOp<T>(num_items);
}
}
template <typename T>
void TestIteratorOp(int num_items)
{
void *d_temp_storage{};
std::size_t temp_storage_size{};
thrust::device_vector<int> num_selected(1);
auto in = thrust::make_counting_iterator(static_cast<T>(0));
auto out = thrust::make_discard_iterator();
CubDebugExit(cub::DeviceSelect::Unique(d_temp_storage,
temp_storage_size,
in,
out,
num_selected.begin(),
num_items));
thrust::device_vector<char> temp_storage(temp_storage_size);
d_temp_storage = thrust::raw_pointer_cast(temp_storage.data());
CubDebugExit(cub::DeviceSelect::Unique(d_temp_storage,
temp_storage_size,
in,
out,
num_selected.begin(),
num_items));
AssertEquals(num_selected[0], num_items);
}
template <typename T>
void TestIterator(int num_items)
{
if (num_items < 0)
{
TestIteratorOp<T>(0);
TestIteratorOp<T>(1);
TestIteratorOp<T>(100);
TestIteratorOp<T>(10000);
TestIteratorOp<T>(1000000);
}
else
{
TestIteratorOp<T>(num_items);
}
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_items = -1;
int entropy_reduction = 0;
int maxseg = 1000;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("n", num_items);
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("maxseg", maxseg);
args.GetCmdLineArgument("entropy", entropy_reduction);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--i=<timing iterations> "
"[--device=<device-id>] "
"[--maxseg=<max segment length>]"
"[--entropy=<segment length bit entropy reduction rounds>]"
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
g_device_giga_bandwidth = args.device_giga_bandwidth;
printf("\n");
// %PARAM% TEST_CDP cdp 0:1
// Test different input types
Test<unsigned char>(num_items);
Test<unsigned short>(num_items);
Test<unsigned int>(num_items);
Test<unsigned long long>(num_items);
Test<uchar2>(num_items);
Test<ushort2>(num_items);
Test<uint2>(num_items);
Test<ulonglong2>(num_items);
Test<uchar4>(num_items);
Test<ushort4>(num_items);
Test<uint4>(num_items);
Test<ulonglong4>(num_items);
Test<TestFoo>(num_items);
Test<TestBar>(num_items);
TestIterator<int>(num_items);
return 0;
}
|
49d985feb821c8d4e3ac3de97ff736d734efa093.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x8_32x64x1_8x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x8_64x32x1_8x8_8x4_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x128x8_32x64x1_8x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_64x32x1_8x8_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_32x64x1_8x8_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 1
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x32x8_64x32x1_8x8_8x4_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x128x8_32x64x1_8x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x64x8_64x32x1_8x8_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x256x8_16x64x1_4x8_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x128x8_32x32x1_8x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 64 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x256x8_32x64x1_8x8_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x128x8_64x32x1_8x8_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x64x8_32x32x1_8x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x128x8_32x64x1_8x8_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 256 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_256x32x8_64x16x1_8x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 256 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_256x64x8_64x32x1_8x8_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x128x8_16x32x1_4x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x256x8_16x64x1_4x8_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x64x8_32x16x1_4x4_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x128x8_32x32x1_8x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 256 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_256x64x8_64x16x1_8x4_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
| 49d985feb821c8d4e3ac3de97ff736d734efa093.cu | /***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x8_32x64x1_8x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x8_64x32x1_8x8_8x4_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x128x8_32x64x1_8x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_64x32x1_8x8_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_32x64x1_8x8_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 1
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x32x8_64x32x1_8x8_8x4_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x128x8_32x64x1_8x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x64x8_64x32x1_8x8_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x256x8_16x64x1_4x8_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x128x8_32x32x1_8x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 64 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x256x8_32x64x1_8x8_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x128x8_64x32x1_8x8_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x64x8_32x32x1_8x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x128x8_32x64x1_8x8_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 256 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_256x32x8_64x16x1_8x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 256 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_256x64x8_64x32x1_8x8_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x128x8_16x32x1_4x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_64x256x8_16x64x1_4x8_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x64x8_32x16x1_4x4_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_128x128x8_32x32x1_8x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 256 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_ssrgemm_tn_n_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_ssrgemm_tn_n_256x64x8_64x16x1_8x4_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
|
bd2c06e241ac3c4f9f6ec02bded0b9d7886ac673.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void)
{
printf("threadIdx:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z);
printf("blockIdx:(%d, %d, %d)\n", blockIdx.x, blockIdx.y, blockIdx.z);
printf("blockDim:(%d, %d, %d)\n", blockDim.x, blockDim.y, blockDim.z);
printf("gridDim:(%d, %d, %d)\n", gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv)
{
int nElem = 6;
dim3 block(3);
dim3 grid((nElem + block.x - 1) / block.x);
printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z);
hipLaunchKernelGGL(( checkIndex), dim3(grid), dim3(block), 0, 0, );
CHECK(hipDeviceReset());
return(0);
}
| bd2c06e241ac3c4f9f6ec02bded0b9d7886ac673.cu | #include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void)
{
printf("threadIdx:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z);
printf("blockIdx:(%d, %d, %d)\n", blockIdx.x, blockIdx.y, blockIdx.z);
printf("blockDim:(%d, %d, %d)\n", blockDim.x, blockDim.y, blockDim.z);
printf("gridDim:(%d, %d, %d)\n", gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv)
{
int nElem = 6;
dim3 block(3);
dim3 grid((nElem + block.x - 1) / block.x);
printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z);
checkIndex<<<grid, block>>>();
CHECK(cudaDeviceReset());
return(0);
}
|
9f674e4a4fd6e7b5b8acb694d891109d8c3226f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <stdlib.h>
#include <time.h>
#include <conio.h>
#include "shifts.h"
#include "Constants.h"
unsigned int
ZERO_MEMORY[MAX],
KKK[33] = { 3, 5, 17, 23, 27, 29, 39, 47, 57, 65, 71, 75, 77, 83, 93, 99, 105, 107, 113, 117, 129, 135, 143, 149, 153, 159, 167, 173, 185, 189, 195, 199, 203 },
HHH[33] = { 285, 297, 299, 303, 309, 315, 323, 327, 329, 339, 353, 359, 363, 365, 369, 383, 387, 395, 413, 419, 429, 437, 453, 465, 467, 479, 483, 485, 489, 497, 507, 509, 513 },
Base[THREAD_NUMBER],
Base_[THREAD_NUMBER],
N_Me,
Me,
InverseByModuleM_Me,
InverseByModuleMMMe,
input[THREAD_NUMBER],
input_[THREAD_NUMBER],
N[THREAD_NUMBER] ,
N_[THREAD_NUMBER] ,
Mi[THREAD_NUMBER][MAX], // Large
Mi_[THREAD_NUMBER][MAX], // Large
M[ MAX ], //
M_[ MAX ], //
$N[MAX],
MiInv[THREAD_NUMBER] ,
Mi_Inv[THREAD_NUMBER] ,
SQR_M_MOD_N_RNS[THREAD_NUMBER],
SQR_M_MOD_N_RNS_[THREAD_NUMBER] ,
InverseByModuleMM_[THREAD_NUMBER] ,
InverseByModuleNM[THREAD_NUMBER] ,
Mi_InMe[THREAD_NUMBER] ,
MiInMe [THREAD_NUMBER],
M_InBase[THREAD_NUMBER],
MiInBase_[THREAD_NUMBER][THREAD_NUMBER],
Mi_InBase[THREAD_NUMBER][THREAD_NUMBER],
ABmodN[2][THREAD_NUMBER];
bool power[ MAX * INT_SIZE ];
bool e[ MAX * INT_SIZE ];
bool d[ MAX * INT_SIZE ];
int iterationsCounter,
lastValueOfStateInformation,
currentOperationIndex,
eBitsCount,
numberOfIterationsForE,
dBitsCount,
numberOfIterationsForD;
int profiler_Inversions = 0,
profiler_extended_euclid_Iterations[1000000],
profiler_eeI_counter = -1;
void clearLongVariable ( unsigned int *a ) {
memcpy( a, ZERO_MEMORY, MAX * sizeof(int) );
}
void copyVariable ( unsigned int *source, unsigned int *dest ) {
memcpy( dest, source, MAX * sizeof(int) );
}
void convertToNormalForm (char input[], unsigned int *a){
clearLongVariable (a);
int temp = 0;
for( int i = MAX - 1,
int j = strlen(input) - 1,
int p = 0;
j >= 0; j-- ) {
if ( input[j] == 32 ) { continue; } //
else if ( toupper(input[j]) == 65 ) { temp |= 0xA << p; }
else if ( toupper(input[j]) == 66 ) { temp |= 0xB << p; }
else if ( toupper(input[j]) == 67 ) { temp |= 0xC << p; }
else if ( toupper(input[j]) == 68 ) { temp |= 0xD << p; }
else if ( toupper(input[j]) == 69 ) { temp |= 0xE << p; }
else if ( toupper(input[j]) == 70 ) { temp |= 0xF << p; }
else { temp |= ( toupper(input[j]) - 48 ) << p; }
p+=4;
if( p >= 32 || j == 0){ p = 0; a[i] = temp; temp = 0; i--; }
}
}
void convertFromNormalForm (char output[], unsigned int *a) {
int temp;
int j = ( MAX * 8 ) - 1;
output [ j + 1 ] = '\0';
for( int i = MAX - 1; i >= 0; i-- ) {
int mask = 0xF;
for ( int p = 0; p < 8; p++) {
temp = a[ i ] ;
temp >>= 4*p;
temp &= mask ;
output [ j ] = ( temp > 9 ) ? ( temp + 55 ) : ( temp + 48 );
j--;
}
}
}
void convertPowerToArrayOfBits( unsigned int* $power, bool *power, int *bin_digits_in_power, int *totalIterationsNeeded ){
int c = 0;
for(int i = MAX * INT_SIZE - 1, bit; i > 0 ; i--){
bit = shiftToRightVariable($power);
if (bit == 1){
*bin_digits_in_power = MAX * INT_SIZE - i;
c++;
}
power[i] = (bit == 1) ? true : false;
}
c *= 2;
c += 2 * (*bin_digits_in_power);
*totalIterationsNeeded = c;
}
void add ( unsigned int *a,
unsigned int *b,
unsigned int *c)
{
int CF = 0;
for (int i = MAX - 1; i >= 0; i--) {
c[ i ] = a[ i ] + b[ i ] + CF;
if ( ( c[ i ] < a [ i ] ) || ( c[ i ] < b [ i ] ) ) {
CF = 1;
} else {
CF = 0;
}
}
}
void sub ( unsigned int *a,
unsigned int *b,
unsigned int *c)
{
unsigned int tempResult [MAX];
clearLongVariable ( tempResult );
int CF = 0;
for (int i = MAX - 1; i >= 0; i--) {
tempResult[i] = a[i] - b[i] - CF;
if(b[i] == 0xFFFFFFFF){
CF = 1;
continue;
}
if(a[i] < (b[i] + CF) ){
CF = 1;
} else {
CF=0;
}
}
copyVariable ( tempResult, c );
}
//
// 1 a > b
// 0 a = b
// -1 a < b
//
int cmp (unsigned int *a, unsigned int *b)
{
bool a_positive = !(a[0] & 0x80000000);
bool b_positive = !(b[0] & 0x80000000);
// 1. + +
// 1. + -
// 1. - +
// 1. - -
if ( a_positive && b_positive ){
for (int i = 0; i < MAX; i++) {
if ( a[ i ] > b[ i ] ) {
return 1;
}
if ( a[ i ] < b[ i ] ) {
return -1;
}
}
return 0;
} else if(a_positive && !b_positive) {
return 1;
} else if(!a_positive && b_positive) {
return -1;
} else { // two numbers are negative:
for (int i = 0; i < MAX; i++) {
if ( a[ i ] > b[ i ] ) {
return 1;
}
if ( a[ i ] < b[ i ] ) {
return -1;
}
}
return 0;
}
}
void mod ( unsigned int *a,
unsigned int *b,
unsigned int *c)
{
unsigned int temp_A[ MAX ];
unsigned int R[ MAX ]; // remainder
copyVariable ( a, temp_A );
if ( cmp ( temp_A, b) <= 0) {
copyVariable ( temp_A, c );
return;
}
clearLongVariable ( R ); // initialize remainder to zero
// Integer division (unsigned) with remainder
for (int i = MAX * INT_SIZE - 1; i >= 0 ; i-- ) {
shiftToLeftVariable ( R ); // left-shift R by 1 bit
R[ MAX - 1] |= shiftToLeftVariable ( temp_A ); // set the least-significant bit of R equal to bit i of the numerator
if ( cmp( R, b ) >= 0 ) {
sub ( R, b, R );
}
}
copyVariable ( R, c );
}
void mod_ ( unsigned int *a,
unsigned int b,
unsigned int* c)
{
unsigned int temp_A[ MAX ];
unsigned long long R; //
copyVariable ( a, temp_A );
R = 0;
for (int i = MAX * INT_SIZE - 1; i >= 0 ; i-- ) {
R <<= 1;
R |= shiftToLeftVariable ( temp_A );
if ( R >= b ) {
R -= b;
}
}
*c = (unsigned int) R;
}
// division
void div ( unsigned int *a,
unsigned int *b,
unsigned int *c)
{
unsigned int temp_A[ MAX ];
unsigned int temp_B[ MAX ];
unsigned int Q[ MAX ]; //
unsigned int R[ MAX ]; //
unsigned int zero [MAX];
unsigned int mask = 0x80000000;
bool aIsLessThenZero = false,
bIsLessThenZero = false;
for(int i=0; i< MAX;i++){
zero[i] = 0;
}
if (cmp (a,zero) < 0) {
sub (zero, a, temp_A);
aIsLessThenZero = true;
} else {
copyVariable ( a, temp_A );
}
if (cmp (b,zero) < 0) {
sub (zero, b, temp_B);
bIsLessThenZero = true;
} else {
copyVariable ( b, temp_B );
}
clearLongVariable ( Q );
clearLongVariable ( R );
//Integer division (unsigned) with remainder
//http://en.wikipedia.org/wiki/Division_algorithm
int j;
for (int i = MAX * INT_SIZE - 1; i >= 0 ; i-- ) {
j = i / INT_SIZE ;
shiftToLeftVariable ( R );
R[ MAX - 1] |= shiftToLeftVariable ( temp_A );
if ( cmp( R, temp_B ) >= 0 ) {
sub ( R, temp_B, R );
Q[MAX - 1 - j] |= mask;
}
mask >>= 1;
if(mask == 0) mask = 0x80000000;
}
// - / - = +
// + / - = -
// - / + = -
// + / + = +
if (aIsLessThenZero ^ bIsLessThenZero) {
sub (zero, Q, Q);
}
copyVariable ( Q, c );
}
void div_ ( unsigned int *a,
unsigned int b,
unsigned int *c)
{
unsigned int temp_A[ MAX ];
unsigned long long temp_B;
unsigned int Q[ MAX ]; //
unsigned long long R; //
unsigned int zero [MAX];
bool aIsLessThenZero = false,
bIsLessThenZero = false;
for(int i=0; i< MAX;i++){
zero[i] = 0;
}
if (cmp (a,zero) < 0) {
sub (zero, a, temp_A);
aIsLessThenZero = true;
} else {
copyVariable ( a, temp_A );
}
if ( b < 0 ) {
temp_B = b * (-1);
bIsLessThenZero = true;
} else {
temp_B = b;
}
clearLongVariable ( Q );
R = 0 ;
unsigned int mask = 0x80000000;
//Integer division (unsigned) with remainder
//http://en.wikipedia.org/wiki/Division_algorithm
int j;
for (int i = MAX * INT_SIZE - 1; i >= 0 ; i-- ) {
j = i / INT_SIZE ;
R <<= 1;
R |= shiftToLeftVariable ( temp_A );
if ( R >= temp_B ) {
R -= temp_B;
Q[MAX - 1 - j] |= mask;
}
mask >>= 1;
if(mask == 0) mask = 0x80000000;
}
// - / - = +
// + / - = -
// - / + = -
// + / + = +
if (aIsLessThenZero ^ bIsLessThenZero) {
sub (zero, Q, Q);
}
copyVariable ( Q, c );
}
void mul (unsigned int *firstFactor,
unsigned int *secondFactor,
unsigned int *product)
{
unsigned int tempFirstFactor[ MAX ];
unsigned int tempSecondFactor[ MAX ];
unsigned int tempProduct[ MAX ];
unsigned int zero [MAX];
bool firstFactorIsLessThenZero = false,
secondFactorIsLessThenZero = false;
for(int i=0; i < MAX;i++){
zero[i] = 0;
}
if (cmp (firstFactor,zero) < 0) {
sub (zero, firstFactor, tempFirstFactor);
firstFactorIsLessThenZero = true;
} else {
copyVariable ( firstFactor, tempFirstFactor );
}
if (cmp (secondFactor,zero) < 0) {
sub (zero, secondFactor, tempSecondFactor);
secondFactorIsLessThenZero = true;
} else {
copyVariable ( secondFactor, tempSecondFactor );
}
int CF;
clearLongVariable (product);
for (int i = 0; i < MAX * INT_SIZE; i++ ) {
CF = shiftToRightVariable ( tempSecondFactor );
if ( CF == 1 ) {
add ( tempFirstFactor, product, product);
}
shiftToLeftVariable ( tempFirstFactor );
}
// - / - = +
// + / - = -
// - / + = -
// + / + = +
copyVariable(product, tempProduct);
if (firstFactorIsLessThenZero ^ secondFactorIsLessThenZero) {
sub (zero, product, product);
}
}
// a * x = -1 mod b
// a * x + b * y = gcd(a,b)
void extended_euclid( unsigned int *a,
unsigned int *b,
unsigned int *x,
unsigned int *y,
unsigned int *d )
{
unsigned int x1[ MAX ];
unsigned int x2[ MAX ];
unsigned int y1[ MAX ];
unsigned int y2[ MAX ];
unsigned int q[ MAX ];
unsigned int r[ MAX ];
unsigned int zero[ MAX ];
unsigned int temp[ MAX ];
unsigned int temp_a[ MAX ];
unsigned int temp_b[ MAX ];
copyVariable( a, temp_a );
copyVariable( b, temp_b );
clearLongVariable ( x1 );
clearLongVariable ( x2 );
clearLongVariable ( y1 );
clearLongVariable ( y2 );
clearLongVariable ( q );
clearLongVariable ( r );
clearLongVariable ( zero );
clearLongVariable ( temp );
x2[ MAX-1 ] = 1; // x2 = 1
x1[ MAX-1 ] = 0; // x1 = 0
y2[ MAX-1 ] = 0; // y2 = 0
y1[ MAX-1 ] = 1; // y1 = 1
profiler_eeI_counter++;
profiler_extended_euclid_Iterations[profiler_eeI_counter] = 0;
while ( cmp( temp_b, zero ) > 0) {
profiler_extended_euclid_Iterations[profiler_eeI_counter]++;
div ( temp_a, temp_b, q); //q = a / b,
mul ( q, temp_b, temp );
sub ( temp_a, temp, r); //r = a - q * b;
clearLongVariable ( temp );
mul ( q, x1, temp );
sub ( x2, temp, x ); //*x = x2 - q * x1,
clearLongVariable ( temp );
mul( q, y1, temp );
sub( y2, temp, y ); //*y = y2 - q * y1;
copyVariable( temp_b, temp_a ); //a = b,
copyVariable( r, temp_b ); //b = r;
copyVariable( x1, x2 ); //x2 = x1,
copyVariable( x, x1 ); //x1 = *x, // t
copyVariable( y1, y2 ); //y2 = y1, //
copyVariable( y, y1 ); //y1 = *y;
}
copyVariable( temp_a, d ); //*d = a,
copyVariable( x2, x ); //*x = x2,
copyVariable( y2, y ); //*y = y2;
}
//input * result = 1 mod module
void InverseByModule( unsigned int *input,
unsigned int *module,
unsigned int *result )
{
unsigned int x[MAX];
unsigned int y[MAX];
unsigned int d[MAX];
unsigned int zero[MAX];
profiler_Inversions++;
clearLongVariable(x);
clearLongVariable(y);
clearLongVariable(d);
clearLongVariable(zero);
extended_euclid(input, module, x, y, d);
if (cmp ( x, zero ) < 0 ) {
add ( x, module, zero );
copyVariable( zero, result );
} else {
copyVariable( x, result );
}
//return x < 0 ? x + module : x;
}
void InverseByModule_( unsigned int *input,
unsigned int module,
unsigned int *result )
{
unsigned int x[MAX];
unsigned int y[MAX];
unsigned int d[MAX];
unsigned int temp[MAX];
unsigned int zero[MAX];
profiler_Inversions++;
clearLongVariable(x);
clearLongVariable(y);
clearLongVariable(d);
clearLongVariable(temp);
clearLongVariable(zero);
temp[ MAX - 1 ] = module;
extended_euclid(input, temp, x, y, d);
if (cmp ( x, zero ) < 0 ) {
add ( x, temp, zero );
*result = zero[MAX-1];
} else {
*result = x[MAX-1];
}
}
void getNumberInRNSByModMe( unsigned int* input, unsigned long long* result){
unsigned int X[MAX];
unsigned int R[MAX];
unsigned int tempProduct[MAX];
unsigned int tempM[MAX];
unsigned int tempFirstFactor[MAX];
unsigned long long temp;
for (int i = 0; i < MAX; i++){
tempM[ i ] = M[ i ];
X[ i ] = 0;
R[ i ] = 0;
}
// X[i] = INPUT[i] * MiInv[i] * Mi[i]
for (int i = 0; i < THREAD_NUMBER; i++){
for (int j = 0; j < MAX; j++){
tempProduct[j] = 0;
tempFirstFactor [j] = Mi[i][j];
}
temp = input[i];
temp *= MiInv[i];
while( temp !=0 ){
if ( shr_long (temp, 0) == 1 ) {
add ( tempFirstFactor, tempProduct, tempProduct);
}
shiftToLeftVariable ( tempFirstFactor );
}
add ( X, tempProduct, X);
}
//X[i] % M
for (int i = MAX * INT_SIZE - 1; i >= 0 ; i-- ) {
shiftToLeftVariable ( R );
R[ MAX - 1] |= shiftToLeftVariable ( X );
if ( cmp( R, tempM ) >= 0 ) {
sub ( R, tempM, R );
}
}
//X[i] % M % Me (Me = 2^6)
*result = 0;
*result += ( shiftToRightVariable ( R )) == 1 ? 1 : 0;
*result += ( shiftToRightVariable ( R )) == 1 ? 2 : 0;
*result += ( shiftToRightVariable ( R )) == 1 ? 4 : 0;
*result += ( shiftToRightVariable ( R )) == 1 ? 8 : 0;
*result += ( shiftToRightVariable ( R )) == 1 ? 16 : 0;
*result += ( shiftToRightVariable ( R )) == 1 ? 32 : 0;
}
void convertFromRNS( unsigned int* input, unsigned int* result){
unsigned int X[MAX];
unsigned int R[MAX];
unsigned int tempInput[THREAD_NUMBER];
unsigned int tempProduct[MAX];
unsigned int tempM[MAX];
unsigned int tempFirstFactor[MAX];
unsigned long long temp;
for (int i = 0; i < MAX; i++){
tempM[ i ] = M[ i ];
X[ i ] = 0;
R[ i ] = 0;
}
// X[i] = INPUT[i] * MiInv[i] * Mi[i]
for (int i = 0; i < THREAD_NUMBER; i++){
for (int j = 0; j < MAX; j++){
tempProduct[j] = 0;
tempFirstFactor [j] = Mi[i][j];
}
tempInput[i] = input[i];
temp = tempInput[i];
temp *= MiInv[i];
while( temp !=0 ){
if ( shr_long (temp, 0) == 1 ) {
add ( tempFirstFactor, tempProduct, tempProduct);
}
shiftToLeftVariable ( tempFirstFactor );
}
add ( X, tempProduct, X);
}
//X[i] % M
for (int i = MAX * INT_SIZE - 1; i >= 0 ; i-- ) {
shiftToLeftVariable ( R );
R[ MAX - 1] |= shiftToLeftVariable ( X );
if ( cmp( R, tempM ) >= 0 ) {
sub ( R, tempM, R );
}
}
mod(R, $N, result);
}
void convertFromRNS_( unsigned int* input, unsigned int* result){
unsigned int X[MAX];
unsigned int R[MAX];
unsigned int tempProduct[MAX];
unsigned int tempInput[THREAD_NUMBER];
unsigned int tempM[MAX];
unsigned int tempFirstFactor[MAX];
unsigned long long temp;
for (int i = 0; i < MAX; i++){
tempM[ i ] = M_[ i ];
X[ i ] = 0;
R[ i ] = 0;
}
// X[i] = INPUT[i] * MiInv[i] * Mi[i]
for (int i = 0; i < THREAD_NUMBER; i++){
for (int j = 0; j < MAX; j++){
tempProduct[j] = 0;
tempFirstFactor [j] = Mi_[i][j];
}
tempInput[i] = input[i];
temp = tempInput[i];
temp *= Mi_Inv[i];
while( temp !=0 ){
if ( shr_long (temp, 0) == 1 ) {
add ( tempFirstFactor, tempProduct, tempProduct);
}
shiftToLeftVariable ( tempFirstFactor );
}
add ( X, tempProduct, X);
}
//X[i] % M
for (int i = MAX * INT_SIZE - 1; i >= 0 ; i-- ) {
shiftToLeftVariable ( R );
R[ MAX - 1] |= shiftToLeftVariable ( X );
if ( cmp( R, tempM ) >= 0 ) {
sub ( R, tempM, R );
}
}
mod(R, $N, result);
}
void printCurrentStateInformation(){
//system("cls");
if( currentOperationIndex == 0 ){
if( lastValueOfStateInformation != iterationsCounter * 100 / numberOfIterationsForE ){
lastValueOfStateInformation = iterationsCounter * 100 / numberOfIterationsForE;
printf("Encryption... %i %% done.\n", iterationsCounter * 100 / numberOfIterationsForE );
}
} else if( currentOperationIndex == 1 ){
if( lastValueOfStateInformation != iterationsCounter * 100 / numberOfIterationsForD ){
lastValueOfStateInformation = iterationsCounter * 100 / numberOfIterationsForD;
printf("Decryption... %i %% done.\n", iterationsCounter * 100 / numberOfIterationsForD );
}
}
iterationsCounter++ ;
}
void MM( unsigned int* A,
unsigned int* A_,
unsigned int* B,
unsigned int* B_,
unsigned int* R,
unsigned int* R_ )
{
unsigned long long Q[THREAD_NUMBER];
unsigned long long Xi[THREAD_NUMBER];
unsigned long long Q_Me, R_Me, A_Me, B_Me, A_Me2, B_Me2 ;
unsigned long long Q_[THREAD_NUMBER];
unsigned long long s, s1, s2, currSum, r;
unsigned long long Sig[THREAD_NUMBER];
unsigned long long Beta, temp;
unsigned long long tempA[THREAD_NUMBER];
unsigned long long tempB[THREAD_NUMBER];
int rightBit;
clock_t begin;
begin = clock();
//computation of Q
for (int i=0; i<THREAD_NUMBER; i++){
Q[i] = ( Base[ i ] - A[ i ] ) % Base[i];
Q[i] *= (B[ i ] % Base[i]);
Q[i] %= Base[i];
Q[i] *= (InverseByModuleNM[i] % Base[i]);
Q[i] %= Base[i] ;
}
// First Base extension:
//****************************************************************************
for (int i=0; i<THREAD_NUMBER; i++){
Sig [i] = Q[i] % Base [i];
Sig [i] *= MiInv[i] % Base [i];
Sig [i] %= Base [i];
}
for (int i=0; i<THREAD_NUMBER; i++){
s=0;
for (int j=0; j<THREAD_NUMBER; j++){
s += ((MiInBase_[i][j] % Base_ [i]) * (Sig[j] % Base_ [i])) % Base_ [i];
}
Q_[i] = s % Base_ [i];
}
//Extra modulus computation;
// Only first thread:
s=0;
for (int i=0; i<THREAD_NUMBER; i++){
s += ((MiInMe[i] % Me) * ((Q[i] * MiInv[i]) % Base [i]) % Me ) % Me;
}
Q_Me = s % Me;
for (int i=0; i<THREAD_NUMBER; i++){
tempA[i] = A[i];
tempB[i] = B[i];
}
unsigned long long jjj = 1;
unsigned long long j;
unsigned int ttempA, ttempB;
for (int i=0; i<THREAD_NUMBER; i++){
InverseByModule_(Mi[i], 64, &ttempA);
jjj *= ttempA;
}
A_Me2 = jjj % Me;
getNumberInRNSByModMe(A, &A_Me);
getNumberInRNSByModMe(B, &B_Me);
//printf("\n[%4.2f] Q_Me, A_Me, B_Me\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
// Computing R in Base_:
for (int i=0; i<THREAD_NUMBER; i++){
temp = ( (Q_[i] * N_[i]) % Base_[i] + ((unsigned long long)A_[i] * (unsigned long long)B_[i]) % Base_[i] ) % Base_[i] ;
temp *= InverseByModuleMM_[i] % Base_[i];
temp %= Base_[i];
R_[i] = (unsigned int)temp;
}
R_Me = (((A_Me * B_Me) + (Q_Me * N_Me) ) * InverseByModuleMMMe ) % Me ;
//printf("\n[%4.2f] R_, R_Me\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
//Second base extension:
//*********************************************************************************************
for (int i=0; i<THREAD_NUMBER; i++){
Xi [i] = R_[i] % Base_ [i];
Xi [i] *= Mi_Inv[i] % Base_ [i];
Xi [i] %= Base_ [i];
}
//Computing beta:
s=0;
for (int j=0; j<THREAD_NUMBER; j++){
s += Xi [j] * Mi_InMe[j];
}
Beta = ( ((s - R_Me) % Me) * InverseByModuleM_Me) % Me;
//printf("\n[%4.2f] Beta\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
for (int i=0; i<THREAD_NUMBER; i++){
s1=0;
s2=0;
currSum=0; //[s1, s2] 128 bit max;
for (int j=0; j<THREAD_NUMBER; j++){
s2 += Mi_InBase[i][j] * Xi[j];
//overflow
if(s2 < currSum ) {
s1++;
}
currSum = s2;
}
if ( s1==0 ){
R[i] = ( s2 - (( Beta % Base[i] ) * M_InBase[i] ) % Base[i] ) % Base[i];
} else {
r = 0;
s2 -= (( Beta % Base[i] ) * M_InBase[i] ) % Base[i] ;
for (int j = 4 * INT_SIZE - 1; j > 0 ; j-- ) {
rightBit = ( s2 & ( 0x8000000000000000 ) ) == ( 0x8000000000000000 ) ? 1 : 0;
r <<= 1;
s2 <<=1;
s1 <<= 1;
s1 &= 0xFFFFFFFFFFFFFFFE;
s1 |= rightBit;
r |= ( s1 & ( 0x8000000000000000 ) ) == ( 0x8000000000000000 ) ? 1 : 0;
if ( r > Base[i] ) {
r -= Base[i];
}
}
R[i] = r;
}
}
//printf("\n[%4.2f] R\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
//printCurrentStateInformation();
//(A * B * M^-1 mod N + Beta * N) mod M
}
void showProfilerInfo(){
printf("\nInversions: %i", profiler_Inversions);
}
void generateStaticData(){
clock_t begin;
unsigned int temp[ MAX ];
unsigned int $SQR_M_MOD_N[ MAX ];
unsigned int $InverseByModuleMM_[ MAX ];
unsigned int $InverseByModule$NM[ MAX ];
begin = clock();
// M = Base[0] * Base[1] * ... * Base[n]
// M_ = Base_[0] * Base_[1] * ... * Base_[n]
convertToNormalForm( "1" , M);
convertToNormalForm( "1" , M_);
for(int i=0; i < THREAD_NUMBER; i++){
clearLongVariable( temp );
temp[ MAX - 1 ] = Base[i];
mul ( M, temp, M );
temp[ MAX - 1 ] = Base_[i];
mul ( M_, temp, M_ );
}
printf("\n[%4.2f] M and M_ done.\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
clearLongVariable( $SQR_M_MOD_N );
clearLongVariable( $InverseByModuleMM_ );
clearLongVariable( $InverseByModule$NM );
clearLongVariable( temp );
// $SQR_M_MOD_N = M*M % $N;
//mul ( M, M, temp );
//mod (temp, $N, $SQR_M_MOD_N);
//sqrMmodN(temp, $N, $SQR_M_MOD_N);
mod (M, $N, temp);
mul (temp, temp, $SQR_M_MOD_N);
copyVariable($SQR_M_MOD_N,temp);
mod (temp,$N,$SQR_M_MOD_N);
printf("\n[%4.2f] $SQR_M_MOD_N done.\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
InverseByModule(M, M_, $InverseByModuleMM_);
printf("\n[%4.2f] $InverseByModuleMM_ done [%i iterations].\n",(double)(clock() - begin) / CLOCKS_PER_SEC, profiler_extended_euclid_Iterations[0]);
InverseByModule ($N, M , $InverseByModule$NM);
printf("\n[%4.2f] $InverseByModule$NM done [%i iterations].\n",(double)(clock() - begin) / CLOCKS_PER_SEC, profiler_extended_euclid_Iterations[1]);
for( int i = 0; i < THREAD_NUMBER; i++){
ABmodN[0][i] = 1;
ABmodN[1][i] = 1;
mod_($N, Base[i], &N[i]);
mod_($N, Base_[i], &N_[i]);
div_(M, Base[i], Mi[i]);
div_(M_, Base_[i], Mi_[i]);
InverseByModule_(Mi[i], Base[i], &MiInv[i]);
InverseByModule_(Mi_[i], Base_[i], &Mi_Inv[i]);
mod_($SQR_M_MOD_N, Base[i], &SQR_M_MOD_N_RNS[i]);
mod_($SQR_M_MOD_N, Base_[i], &SQR_M_MOD_N_RNS_[i]);
mod_($InverseByModuleMM_, Base_[i], &InverseByModuleMM_[i]);
mod_($InverseByModule$NM, Base[i], &InverseByModuleNM[i]);
mod_(Mi_[i], Me, &Mi_InMe[i]);
mod_(Mi[i], Me, &MiInMe[i]);
mod_(M_, Base[i], &M_InBase[i]);
printf("\n[%4.2f] #%i thread computation done.\n",(double)(clock() - begin) / CLOCKS_PER_SEC, i);
}
for (int i = 0; i < THREAD_NUMBER; i++){
for (int j = 0; j < THREAD_NUMBER; j++){
mod_( Mi [j], Base_ [i], &MiInBase_[i][j] );
mod_( Mi_ [j], Base [i], &Mi_InBase[i][j] );
}
}
printf("\n[%4.2f] &MiInBase_ and &Mi_InBase done.\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
mod_($N, Me, &N_Me);
clearLongVariable( temp );
//(M_^-1 mod Me) % Me;
unsigned int t;
InverseByModule_(M_, Me, &t);
temp[ MAX - 1] = t;
mod_(temp, Me, &InverseByModuleM_Me);
printf("\n[%4.2f] &InverseByModuleM_Me done.\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
//(( M_ * Me ) ^ -1 mod Me) % Me;
unsigned int tempM_Me[ MAX ];
clearLongVariable( tempM_Me );
clearLongVariable( temp );
temp[MAX - 1] = Me;
mul(M_, temp, tempM_Me);
clearLongVariable( temp );
InverseByModule(M, tempM_Me, temp);
mod_(temp, Me, &InverseByModuleMMMe);
printf("\n[%4.2f] &InverseByModuleMMMe done.\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
printf("\nTOTAL TIME SPENT: %f s\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
}
int main(int argc, char **argv)
{
//argv[1] - input message
//argv[2] - action message
//argv[3] - method message
int mode; //encr/decr
int unit; //CPU/GPU
unsigned int $d[MAX];
unsigned int $e[MAX];
unsigned int q[MAX];
unsigned int p[MAX];
//unsigned int n[MAX];
unsigned int p_minus_1[MAX];
unsigned int q_minus_1[MAX];
unsigned int phi[MAX];
unsigned int temp[ MAX ];
unsigned int $input[ MAX ];
unsigned int R[THREAD_NUMBER], R_[THREAD_NUMBER];
clock_t begin;
hipError_t cudaStatus;
char output [ MAX * 10 ];
mode = 2;
unit = 2;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
/*// n = p*q
mul ( p, q, n );
clearLongVariable ( _1 );
_1[ MAX - 1 ] = 1; // for storing digit "1"
clearLongVariable ( p_minus_1 );
clearLongVariable ( q_minus_1 );
// phi = (p-1)*(q-1);
sub ( p, _1, p_minus_1 );
sub ( q, _1, q_minus_1 );
mul ( p_minus_1, q_minus_1, phi );
*/
Me = 64;
/*
convertToNormalForm( "130ebebd67b16a9ab2c53a437badbf8f01a80c750095a7fcfe95742c3d5ed1abb318babc5cb5d9350fee4da65ee074f65e1758117e6945f0fcfc8137528053ce9d1da8618890dee24e5e0bf8c87795bb1d09eddd544640824ee0dd0ea9fd908d27b0f8a1ae5c37f3647fbf2f5795500ad76c195b3387d0458a8f51b701472301" , $N); // modulus
convertToNormalForm( "0123" , $input); // message
convertToNormalForm( "010001" , $e); // public exponenta
convertToNormalForm( "12e8da920d4599458e84ec5ef1656161807f427d05eb79182b7418259d6f6c14364d1f5caf9130c8d9d9d6ea71d1bdbc87781a46a16bcb9e672814fed3b9c96ddffe0a1b0955ae68055c8f92fef518a04fc32a2ea8390e617cc5556a251f9ae9eee70a32e579cb3e9f298848a9b3aaf634f5930ffbf74473f7cb6c0cefee1751" , $d); // secret exponenta
*/
convertToNormalForm( "025123" , $N); // modulus
convertToNormalForm( "01365D" , $e); // public exponenta
convertToNormalForm( "0AD" , $d) ; // secret exponenta
convertPowerToArrayOfBits($e, e, &eBitsCount, &numberOfIterationsForE);
convertPowerToArrayOfBits($d, d, &dBitsCount, &numberOfIterationsForD);
for (int i = 0; i < MAX; i++) {
ZERO_MEMORY[ i ] = 0;
}
for(int i=0; i<THREAD_NUMBER;i++){
Base[i] = 4294967296 - HHH[i];
Base_[i] = 4294967296 - KKK[i];
}
generateStaticData();
convertToNormalForm( argv[1] , $input); // message
for(int z=0; z< THREAD_NUMBER;z++) {
mod_($input, Base[z], &input[z]);
mod_($input, Base_[z], &input_[z]);
}
if( mode == 1 ){
//encrypt
iterationsCounter = 1;
currentOperationIndex = 0;
begin = clock();
for (int z = MAX * INT_SIZE - eBitsCount; z < MAX * INT_SIZE; z++){
MM(ABmodN[0], ABmodN[1], ABmodN[0], ABmodN[1], R, R_);
MM(R, R_, SQR_M_MOD_N_RNS, SQR_M_MOD_N_RNS_, ABmodN[0], ABmodN[1]);
if (e[z]){
MM(ABmodN[0], ABmodN[1], input, input_, R, R_);
MM(R, R_, SQR_M_MOD_N_RNS, SQR_M_MOD_N_RNS_, ABmodN[0], ABmodN[1]);
}
}
if(unit == 2)
printf("\nEncryption ended in : %f s\n",(double)(clock() - begin) / CLOCKS_PER_SEC / 15);
else
printf("\nEncryption ended in : %f s\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
};
if( mode == 2 ){
//decrypt
iterationsCounter = 1;
currentOperationIndex = 1;
begin = clock();
for (int z = MAX * INT_SIZE - dBitsCount; z < MAX * INT_SIZE; z++){
MM(ABmodN[0], ABmodN[1], ABmodN[0], ABmodN[1], R, R_);
MM(R, R_, SQR_M_MOD_N_RNS, SQR_M_MOD_N_RNS_, ABmodN[0], ABmodN[1]);
if (d[z]){
MM(ABmodN[0], ABmodN[1], input, input_, R, R_);
MM(R, R_, SQR_M_MOD_N_RNS, SQR_M_MOD_N_RNS_, ABmodN[0], ABmodN[1]);
}
}
if(unit == 2)
printf("\nDecryption ended in : %f s\n",(double)(clock() - begin) / CLOCKS_PER_SEC / 15);
else
printf("\nDecryption ended in : %f s\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
}
convertFromRNS(ABmodN[0], temp);
convertFromNormalForm( output, temp );
printf("%s\n", output);
showProfilerInfo();
/*
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
for (int i=0; i< 100000; i++) {
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
/* cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
*/
Error:
getch();
return 0;
}
| 9f674e4a4fd6e7b5b8acb694d891109d8c3226f3.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <stdlib.h>
#include <time.h>
#include <conio.h>
#include "shifts.h"
#include "Constants.h"
unsigned int
ZERO_MEMORY[MAX],
KKK[33] = { 3, 5, 17, 23, 27, 29, 39, 47, 57, 65, 71, 75, 77, 83, 93, 99, 105, 107, 113, 117, 129, 135, 143, 149, 153, 159, 167, 173, 185, 189, 195, 199, 203 },
HHH[33] = { 285, 297, 299, 303, 309, 315, 323, 327, 329, 339, 353, 359, 363, 365, 369, 383, 387, 395, 413, 419, 429, 437, 453, 465, 467, 479, 483, 485, 489, 497, 507, 509, 513 },
Base[THREAD_NUMBER],
Base_[THREAD_NUMBER],
N_Me,
Me,
InverseByModuleM_Me,
InverseByModuleMMMe,
input[THREAD_NUMBER],
input_[THREAD_NUMBER],
N[THREAD_NUMBER] ,
N_[THREAD_NUMBER] ,
Mi[THREAD_NUMBER][MAX], // Large
Mi_[THREAD_NUMBER][MAX], // Large
M[ MAX ], //
M_[ MAX ], //
$N[MAX],
MiInv[THREAD_NUMBER] ,
Mi_Inv[THREAD_NUMBER] ,
SQR_M_MOD_N_RNS[THREAD_NUMBER],
SQR_M_MOD_N_RNS_[THREAD_NUMBER] ,
InverseByModuleMM_[THREAD_NUMBER] ,
InverseByModuleNM[THREAD_NUMBER] ,
Mi_InMe[THREAD_NUMBER] ,
MiInMe [THREAD_NUMBER],
M_InBase[THREAD_NUMBER],
MiInBase_[THREAD_NUMBER][THREAD_NUMBER],
Mi_InBase[THREAD_NUMBER][THREAD_NUMBER],
ABmodN[2][THREAD_NUMBER];
bool power[ MAX * INT_SIZE ];
bool e[ MAX * INT_SIZE ];
bool d[ MAX * INT_SIZE ];
int iterationsCounter,
lastValueOfStateInformation,
currentOperationIndex,
eBitsCount,
numberOfIterationsForE,
dBitsCount,
numberOfIterationsForD;
int profiler_Inversions = 0,
profiler_extended_euclid_Iterations[1000000],
profiler_eeI_counter = -1;
void clearLongVariable ( unsigned int *a ) {
memcpy( a, ZERO_MEMORY, MAX * sizeof(int) );
}
void copyVariable ( unsigned int *source, unsigned int *dest ) {
memcpy( dest, source, MAX * sizeof(int) );
}
void convertToNormalForm (char input[], unsigned int *a){
clearLongVariable (a);
int temp = 0;
for( int i = MAX - 1,
int j = strlen(input) - 1,
int p = 0;
j >= 0; j-- ) {
if ( input[j] == 32 ) { continue; } // дозволено пробіл для зручності
else if ( toupper(input[j]) == 65 ) { temp |= 0xA << p; }
else if ( toupper(input[j]) == 66 ) { temp |= 0xB << p; }
else if ( toupper(input[j]) == 67 ) { temp |= 0xC << p; }
else if ( toupper(input[j]) == 68 ) { temp |= 0xD << p; }
else if ( toupper(input[j]) == 69 ) { temp |= 0xE << p; }
else if ( toupper(input[j]) == 70 ) { temp |= 0xF << p; }
else { temp |= ( toupper(input[j]) - 48 ) << p; }
p+=4;
if( p >= 32 || j == 0){ p = 0; a[i] = temp; temp = 0; i--; }
}
}
void convertFromNormalForm (char output[], unsigned int *a) {
int temp;
int j = ( MAX * 8 ) - 1;
output [ j + 1 ] = '\0';
for( int i = MAX - 1; i >= 0; i-- ) {
int mask = 0xF;
for ( int p = 0; p < 8; p++) {
temp = a[ i ] ;
temp >>= 4*p;
temp &= mask ;
output [ j ] = ( temp > 9 ) ? ( temp + 55 ) : ( temp + 48 );
j--;
}
}
}
void convertPowerToArrayOfBits( unsigned int* $power, bool *power, int *bin_digits_in_power, int *totalIterationsNeeded ){
int c = 0;
for(int i = MAX * INT_SIZE - 1, bit; i > 0 ; i--){
bit = shiftToRightVariable($power);
if (bit == 1){
*bin_digits_in_power = MAX * INT_SIZE - i;
c++;
}
power[i] = (bit == 1) ? true : false;
}
c *= 2;
c += 2 * (*bin_digits_in_power);
*totalIterationsNeeded = c;
}
void add ( unsigned int *a,
unsigned int *b,
unsigned int *c)
{
int CF = 0;
for (int i = MAX - 1; i >= 0; i--) {
c[ i ] = a[ i ] + b[ i ] + CF;
if ( ( c[ i ] < a [ i ] ) || ( c[ i ] < b [ i ] ) ) {
CF = 1;
} else {
CF = 0;
}
}
}
void sub ( unsigned int *a,
unsigned int *b,
unsigned int *c)
{
unsigned int tempResult [MAX];
clearLongVariable ( tempResult );
int CF = 0;
for (int i = MAX - 1; i >= 0; i--) {
tempResult[i] = a[i] - b[i] - CF;
if(b[i] == 0xFFFFFFFF){
CF = 1;
continue;
}
if(a[i] < (b[i] + CF) ){
CF = 1;
} else {
CF=0;
}
}
copyVariable ( tempResult, c );
}
//
// 1 a > b
// 0 a = b
// -1 a < b
// знакове порівняння чисел
int cmp (unsigned int *a, unsigned int *b)
{
bool a_positive = !(a[0] & 0x80000000);
bool b_positive = !(b[0] & 0x80000000);
// 1. + +
// 1. + -
// 1. - +
// 1. - -
if ( a_positive && b_positive ){
for (int i = 0; i < MAX; i++) {
if ( a[ i ] > b[ i ] ) {
return 1;
}
if ( a[ i ] < b[ i ] ) {
return -1;
}
}
return 0;
} else if(a_positive && !b_positive) {
return 1;
} else if(!a_positive && b_positive) {
return -1;
} else { // two numbers are negative:
for (int i = 0; i < MAX; i++) {
if ( a[ i ] > b[ i ] ) {
return 1;
}
if ( a[ i ] < b[ i ] ) {
return -1;
}
}
return 0;
}
}
void mod ( unsigned int *a,
unsigned int *b,
unsigned int *c)
{
unsigned int temp_A[ MAX ];
unsigned int R[ MAX ]; // remainder
copyVariable ( a, temp_A );
if ( cmp ( temp_A, b) <= 0) {
copyVariable ( temp_A, c );
return;
}
clearLongVariable ( R ); // initialize remainder to zero
// Integer division (unsigned) with remainder
for (int i = MAX * INT_SIZE - 1; i >= 0 ; i-- ) {
shiftToLeftVariable ( R ); // left-shift R by 1 bit
R[ MAX - 1] |= shiftToLeftVariable ( temp_A ); // set the least-significant bit of R equal to bit i of the numerator
if ( cmp( R, b ) >= 0 ) {
sub ( R, b, R );
}
}
copyVariable ( R, c );
}
void mod_ ( unsigned int *a,
unsigned int b,
unsigned int* c)
{
unsigned int temp_A[ MAX ];
unsigned long long R; // остача
copyVariable ( a, temp_A );
R = 0;
for (int i = MAX * INT_SIZE - 1; i >= 0 ; i-- ) {
R <<= 1;
R |= shiftToLeftVariable ( temp_A );
if ( R >= b ) {
R -= b;
}
}
*c = (unsigned int) R;
}
// division
void div ( unsigned int *a,
unsigned int *b,
unsigned int *c)
{
unsigned int temp_A[ MAX ];
unsigned int temp_B[ MAX ];
unsigned int Q[ MAX ]; // остача
unsigned int R[ MAX ]; // залишок
unsigned int zero [MAX];
unsigned int mask = 0x80000000;
bool aIsLessThenZero = false,
bIsLessThenZero = false;
for(int i=0; i< MAX;i++){
zero[i] = 0;
}
if (cmp (a,zero) < 0) {
sub (zero, a, temp_A);
aIsLessThenZero = true;
} else {
copyVariable ( a, temp_A );
}
if (cmp (b,zero) < 0) {
sub (zero, b, temp_B);
bIsLessThenZero = true;
} else {
copyVariable ( b, temp_B );
}
clearLongVariable ( Q );
clearLongVariable ( R );
//Integer division (unsigned) with remainder
//http://en.wikipedia.org/wiki/Division_algorithm
int j;
for (int i = MAX * INT_SIZE - 1; i >= 0 ; i-- ) {
j = i / INT_SIZE ;
shiftToLeftVariable ( R );
R[ MAX - 1] |= shiftToLeftVariable ( temp_A );
if ( cmp( R, temp_B ) >= 0 ) {
sub ( R, temp_B, R );
Q[MAX - 1 - j] |= mask;
}
mask >>= 1;
if(mask == 0) mask = 0x80000000;
}
// - / - = +
// + / - = -
// - / + = -
// + / + = +
if (aIsLessThenZero ^ bIsLessThenZero) {
sub (zero, Q, Q);
}
copyVariable ( Q, c );
}
void div_ ( unsigned int *a,
unsigned int b,
unsigned int *c)
{
unsigned int temp_A[ MAX ];
unsigned long long temp_B;
unsigned int Q[ MAX ]; // остача
unsigned long long R; // залишок
unsigned int zero [MAX];
bool aIsLessThenZero = false,
bIsLessThenZero = false;
for(int i=0; i< MAX;i++){
zero[i] = 0;
}
if (cmp (a,zero) < 0) {
sub (zero, a, temp_A);
aIsLessThenZero = true;
} else {
copyVariable ( a, temp_A );
}
if ( b < 0 ) {
temp_B = b * (-1);
bIsLessThenZero = true;
} else {
temp_B = b;
}
clearLongVariable ( Q );
R = 0 ;
unsigned int mask = 0x80000000;
//Integer division (unsigned) with remainder
//http://en.wikipedia.org/wiki/Division_algorithm
int j;
for (int i = MAX * INT_SIZE - 1; i >= 0 ; i-- ) {
j = i / INT_SIZE ;
R <<= 1;
R |= shiftToLeftVariable ( temp_A );
if ( R >= temp_B ) {
R -= temp_B;
Q[MAX - 1 - j] |= mask;
}
mask >>= 1;
if(mask == 0) mask = 0x80000000;
}
// - / - = +
// + / - = -
// - / + = -
// + / + = +
if (aIsLessThenZero ^ bIsLessThenZero) {
sub (zero, Q, Q);
}
copyVariable ( Q, c );
}
void mul (unsigned int *firstFactor,
unsigned int *secondFactor,
unsigned int *product)
{
unsigned int tempFirstFactor[ MAX ];
unsigned int tempSecondFactor[ MAX ];
unsigned int tempProduct[ MAX ];
unsigned int zero [MAX];
bool firstFactorIsLessThenZero = false,
secondFactorIsLessThenZero = false;
for(int i=0; i < MAX;i++){
zero[i] = 0;
}
if (cmp (firstFactor,zero) < 0) {
sub (zero, firstFactor, tempFirstFactor);
firstFactorIsLessThenZero = true;
} else {
copyVariable ( firstFactor, tempFirstFactor );
}
if (cmp (secondFactor,zero) < 0) {
sub (zero, secondFactor, tempSecondFactor);
secondFactorIsLessThenZero = true;
} else {
copyVariable ( secondFactor, tempSecondFactor );
}
int CF;
clearLongVariable (product);
for (int i = 0; i < MAX * INT_SIZE; i++ ) {
CF = shiftToRightVariable ( tempSecondFactor );
if ( CF == 1 ) {
add ( tempFirstFactor, product, product);
}
shiftToLeftVariable ( tempFirstFactor );
}
// - / - = +
// + / - = -
// - / + = -
// + / + = +
copyVariable(product, tempProduct);
if (firstFactorIsLessThenZero ^ secondFactorIsLessThenZero) {
sub (zero, product, product);
}
}
// a * x = -1 mod b
// a * x + b * y = gcd(a,b)
void extended_euclid( unsigned int *a,
unsigned int *b,
unsigned int *x,
unsigned int *y,
unsigned int *d )
{
unsigned int x1[ MAX ];
unsigned int x2[ MAX ];
unsigned int y1[ MAX ];
unsigned int y2[ MAX ];
unsigned int q[ MAX ];
unsigned int r[ MAX ];
unsigned int zero[ MAX ];
unsigned int temp[ MAX ];
unsigned int temp_a[ MAX ];
unsigned int temp_b[ MAX ];
copyVariable( a, temp_a );
copyVariable( b, temp_b );
clearLongVariable ( x1 );
clearLongVariable ( x2 );
clearLongVariable ( y1 );
clearLongVariable ( y2 );
clearLongVariable ( q );
clearLongVariable ( r );
clearLongVariable ( zero );
clearLongVariable ( temp );
x2[ MAX-1 ] = 1; // x2 = 1
x1[ MAX-1 ] = 0; // x1 = 0
y2[ MAX-1 ] = 0; // y2 = 0
y1[ MAX-1 ] = 1; // y1 = 1
profiler_eeI_counter++;
profiler_extended_euclid_Iterations[profiler_eeI_counter] = 0;
while ( cmp( temp_b, zero ) > 0) {
profiler_extended_euclid_Iterations[profiler_eeI_counter]++;
div ( temp_a, temp_b, q); //q = a / b,
mul ( q, temp_b, temp );
sub ( temp_a, temp, r); //r = a - q * b;
clearLongVariable ( temp );
mul ( q, x1, temp );
sub ( x2, temp, x ); //*x = x2 - q * x1,
clearLongVariable ( temp );
mul( q, y1, temp );
sub( y2, temp, y ); //*y = y2 - q * y1;
copyVariable( temp_b, temp_a ); //a = b,
copyVariable( r, temp_b ); //b = r;
copyVariable( x1, x2 ); //x2 = x1,
copyVariable( x, x1 ); //x1 = *x, // t
copyVariable( y1, y2 ); //y2 = y1, //
copyVariable( y, y1 ); //y1 = *y;
}
copyVariable( temp_a, d ); //*d = a,
copyVariable( x2, x ); //*x = x2,
copyVariable( y2, y ); //*y = y2;
}
//input * result = 1 mod module
void InverseByModule( unsigned int *input,
unsigned int *module,
unsigned int *result )
{
unsigned int x[MAX];
unsigned int y[MAX];
unsigned int d[MAX];
unsigned int zero[MAX];
profiler_Inversions++;
clearLongVariable(x);
clearLongVariable(y);
clearLongVariable(d);
clearLongVariable(zero);
extended_euclid(input, module, x, y, d);
if (cmp ( x, zero ) < 0 ) {
add ( x, module, zero );
copyVariable( zero, result );
} else {
copyVariable( x, result );
}
//return x < 0 ? x + module : x;
}
void InverseByModule_( unsigned int *input,
unsigned int module,
unsigned int *result )
{
unsigned int x[MAX];
unsigned int y[MAX];
unsigned int d[MAX];
unsigned int temp[MAX];
unsigned int zero[MAX];
profiler_Inversions++;
clearLongVariable(x);
clearLongVariable(y);
clearLongVariable(d);
clearLongVariable(temp);
clearLongVariable(zero);
temp[ MAX - 1 ] = module;
extended_euclid(input, temp, x, y, d);
if (cmp ( x, zero ) < 0 ) {
add ( x, temp, zero );
*result = zero[MAX-1];
} else {
*result = x[MAX-1];
}
}
void getNumberInRNSByModMe( unsigned int* input, unsigned long long* result){
unsigned int X[MAX];
unsigned int R[MAX];
unsigned int tempProduct[MAX];
unsigned int tempM[MAX];
unsigned int tempFirstFactor[MAX];
unsigned long long temp;
for (int i = 0; i < MAX; i++){
tempM[ i ] = M[ i ];
X[ i ] = 0;
R[ i ] = 0;
}
// X[i] = INPUT[i] * MiInv[i] * Mi[i]
for (int i = 0; i < THREAD_NUMBER; i++){
for (int j = 0; j < MAX; j++){
tempProduct[j] = 0;
tempFirstFactor [j] = Mi[i][j];
}
temp = input[i];
temp *= MiInv[i];
while( temp !=0 ){
if ( shr_long (temp, 0) == 1 ) {
add ( tempFirstFactor, tempProduct, tempProduct);
}
shiftToLeftVariable ( tempFirstFactor );
}
add ( X, tempProduct, X);
}
//X[i] % M
for (int i = MAX * INT_SIZE - 1; i >= 0 ; i-- ) {
shiftToLeftVariable ( R );
R[ MAX - 1] |= shiftToLeftVariable ( X );
if ( cmp( R, tempM ) >= 0 ) {
sub ( R, tempM, R );
}
}
//X[i] % M % Me (Me = 2^6)
*result = 0;
*result += ( shiftToRightVariable ( R )) == 1 ? 1 : 0;
*result += ( shiftToRightVariable ( R )) == 1 ? 2 : 0;
*result += ( shiftToRightVariable ( R )) == 1 ? 4 : 0;
*result += ( shiftToRightVariable ( R )) == 1 ? 8 : 0;
*result += ( shiftToRightVariable ( R )) == 1 ? 16 : 0;
*result += ( shiftToRightVariable ( R )) == 1 ? 32 : 0;
}
void convertFromRNS( unsigned int* input, unsigned int* result){
unsigned int X[MAX];
unsigned int R[MAX];
unsigned int tempInput[THREAD_NUMBER];
unsigned int tempProduct[MAX];
unsigned int tempM[MAX];
unsigned int tempFirstFactor[MAX];
unsigned long long temp;
for (int i = 0; i < MAX; i++){
tempM[ i ] = M[ i ];
X[ i ] = 0;
R[ i ] = 0;
}
// X[i] = INPUT[i] * MiInv[i] * Mi[i]
for (int i = 0; i < THREAD_NUMBER; i++){
for (int j = 0; j < MAX; j++){
tempProduct[j] = 0;
tempFirstFactor [j] = Mi[i][j];
}
tempInput[i] = input[i];
temp = tempInput[i];
temp *= MiInv[i];
while( temp !=0 ){
if ( shr_long (temp, 0) == 1 ) {
add ( tempFirstFactor, tempProduct, tempProduct);
}
shiftToLeftVariable ( tempFirstFactor );
}
add ( X, tempProduct, X);
}
//X[i] % M
for (int i = MAX * INT_SIZE - 1; i >= 0 ; i-- ) {
shiftToLeftVariable ( R );
R[ MAX - 1] |= shiftToLeftVariable ( X );
if ( cmp( R, tempM ) >= 0 ) {
sub ( R, tempM, R );
}
}
mod(R, $N, result);
}
void convertFromRNS_( unsigned int* input, unsigned int* result){
unsigned int X[MAX];
unsigned int R[MAX];
unsigned int tempProduct[MAX];
unsigned int tempInput[THREAD_NUMBER];
unsigned int tempM[MAX];
unsigned int tempFirstFactor[MAX];
unsigned long long temp;
for (int i = 0; i < MAX; i++){
tempM[ i ] = M_[ i ];
X[ i ] = 0;
R[ i ] = 0;
}
// X[i] = INPUT[i] * MiInv[i] * Mi[i]
for (int i = 0; i < THREAD_NUMBER; i++){
for (int j = 0; j < MAX; j++){
tempProduct[j] = 0;
tempFirstFactor [j] = Mi_[i][j];
}
tempInput[i] = input[i];
temp = tempInput[i];
temp *= Mi_Inv[i];
while( temp !=0 ){
if ( shr_long (temp, 0) == 1 ) {
add ( tempFirstFactor, tempProduct, tempProduct);
}
shiftToLeftVariable ( tempFirstFactor );
}
add ( X, tempProduct, X);
}
//X[i] % M
for (int i = MAX * INT_SIZE - 1; i >= 0 ; i-- ) {
shiftToLeftVariable ( R );
R[ MAX - 1] |= shiftToLeftVariable ( X );
if ( cmp( R, tempM ) >= 0 ) {
sub ( R, tempM, R );
}
}
mod(R, $N, result);
}
void printCurrentStateInformation(){
//system("cls");
if( currentOperationIndex == 0 ){
if( lastValueOfStateInformation != iterationsCounter * 100 / numberOfIterationsForE ){
lastValueOfStateInformation = iterationsCounter * 100 / numberOfIterationsForE;
printf("Encryption... %i %% done.\n", iterationsCounter * 100 / numberOfIterationsForE );
}
} else if( currentOperationIndex == 1 ){
if( lastValueOfStateInformation != iterationsCounter * 100 / numberOfIterationsForD ){
lastValueOfStateInformation = iterationsCounter * 100 / numberOfIterationsForD;
printf("Decryption... %i %% done.\n", iterationsCounter * 100 / numberOfIterationsForD );
}
}
iterationsCounter++ ;
}
void MM( unsigned int* A,
unsigned int* A_,
unsigned int* B,
unsigned int* B_,
unsigned int* R,
unsigned int* R_ )
{
unsigned long long Q[THREAD_NUMBER];
unsigned long long Xi[THREAD_NUMBER];
unsigned long long Q_Me, R_Me, A_Me, B_Me, A_Me2, B_Me2 ;
unsigned long long Q_[THREAD_NUMBER];
unsigned long long s, s1, s2, currSum, r;
unsigned long long Sig[THREAD_NUMBER];
unsigned long long Beta, temp;
unsigned long long tempA[THREAD_NUMBER];
unsigned long long tempB[THREAD_NUMBER];
int rightBit;
clock_t begin;
begin = clock();
//computation of Q
for (int i=0; i<THREAD_NUMBER; i++){
Q[i] = ( Base[ i ] - A[ i ] ) % Base[i];
Q[i] *= (B[ i ] % Base[i]);
Q[i] %= Base[i];
Q[i] *= (InverseByModuleNM[i] % Base[i]);
Q[i] %= Base[i] ;
}
// First Base extension:
//****************************************************************************
for (int i=0; i<THREAD_NUMBER; i++){
Sig [i] = Q[i] % Base [i];
Sig [i] *= MiInv[i] % Base [i];
Sig [i] %= Base [i];
}
for (int i=0; i<THREAD_NUMBER; i++){
s=0;
for (int j=0; j<THREAD_NUMBER; j++){
s += ((MiInBase_[i][j] % Base_ [i]) * (Sig[j] % Base_ [i])) % Base_ [i];
}
Q_[i] = s % Base_ [i];
}
//Extra modulus computation;
// Only first thread:
s=0;
for (int i=0; i<THREAD_NUMBER; i++){
s += ((MiInMe[i] % Me) * ((Q[i] * MiInv[i]) % Base [i]) % Me ) % Me;
}
Q_Me = s % Me;
for (int i=0; i<THREAD_NUMBER; i++){
tempA[i] = A[i];
tempB[i] = B[i];
}
unsigned long long jjj = 1;
unsigned long long j;
unsigned int ttempA, ttempB;
for (int i=0; i<THREAD_NUMBER; i++){
InverseByModule_(Mi[i], 64, &ttempA);
jjj *= ttempA;
}
A_Me2 = jjj % Me;
getNumberInRNSByModMe(A, &A_Me);
getNumberInRNSByModMe(B, &B_Me);
//printf("\n[%4.2f] Q_Me, A_Me, B_Me\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
// Computing R in Base_:
for (int i=0; i<THREAD_NUMBER; i++){
temp = ( (Q_[i] * N_[i]) % Base_[i] + ((unsigned long long)A_[i] * (unsigned long long)B_[i]) % Base_[i] ) % Base_[i] ;
temp *= InverseByModuleMM_[i] % Base_[i];
temp %= Base_[i];
R_[i] = (unsigned int)temp;
}
R_Me = (((A_Me * B_Me) + (Q_Me * N_Me) ) * InverseByModuleMMMe ) % Me ;
//printf("\n[%4.2f] R_, R_Me\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
//Second base extension:
//*********************************************************************************************
for (int i=0; i<THREAD_NUMBER; i++){
Xi [i] = R_[i] % Base_ [i];
Xi [i] *= Mi_Inv[i] % Base_ [i];
Xi [i] %= Base_ [i];
}
//Computing beta:
s=0;
for (int j=0; j<THREAD_NUMBER; j++){
s += Xi [j] * Mi_InMe[j];
}
Beta = ( ((s - R_Me) % Me) * InverseByModuleM_Me) % Me;
//printf("\n[%4.2f] Beta\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
for (int i=0; i<THREAD_NUMBER; i++){
s1=0;
s2=0;
currSum=0; //[s1, s2] 128 bit max;
for (int j=0; j<THREAD_NUMBER; j++){
s2 += Mi_InBase[i][j] * Xi[j];
//overflow
if(s2 < currSum ) {
s1++;
}
currSum = s2;
}
if ( s1==0 ){
R[i] = ( s2 - (( Beta % Base[i] ) * M_InBase[i] ) % Base[i] ) % Base[i];
} else {
r = 0;
s2 -= (( Beta % Base[i] ) * M_InBase[i] ) % Base[i] ;
for (int j = 4 * INT_SIZE - 1; j > 0 ; j-- ) {
rightBit = ( s2 & ( 0x8000000000000000 ) ) == ( 0x8000000000000000 ) ? 1 : 0;
r <<= 1;
s2 <<=1;
s1 <<= 1;
s1 &= 0xFFFFFFFFFFFFFFFE;
s1 |= rightBit;
r |= ( s1 & ( 0x8000000000000000 ) ) == ( 0x8000000000000000 ) ? 1 : 0;
if ( r > Base[i] ) {
r -= Base[i];
}
}
R[i] = r;
}
}
//printf("\n[%4.2f] R\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
//printCurrentStateInformation();
//(A * B * M^-1 mod N + Beta * N) mod M
}
void showProfilerInfo(){
printf("\nInversions: %i", profiler_Inversions);
}
void generateStaticData(){
clock_t begin;
unsigned int temp[ MAX ];
unsigned int $SQR_M_MOD_N[ MAX ];
unsigned int $InverseByModuleMM_[ MAX ];
unsigned int $InverseByModule$NM[ MAX ];
begin = clock();
// M = Base[0] * Base[1] * ... * Base[n]
// M_ = Base_[0] * Base_[1] * ... * Base_[n]
convertToNormalForm( "1" , M);
convertToNormalForm( "1" , M_);
for(int i=0; i < THREAD_NUMBER; i++){
clearLongVariable( temp );
temp[ MAX - 1 ] = Base[i];
mul ( M, temp, M );
temp[ MAX - 1 ] = Base_[i];
mul ( M_, temp, M_ );
}
printf("\n[%4.2f] M and M_ done.\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
clearLongVariable( $SQR_M_MOD_N );
clearLongVariable( $InverseByModuleMM_ );
clearLongVariable( $InverseByModule$NM );
clearLongVariable( temp );
// $SQR_M_MOD_N = M*M % $N;
//mul ( M, M, temp );
//mod (temp, $N, $SQR_M_MOD_N);
//sqrMmodN(temp, $N, $SQR_M_MOD_N);
mod (M, $N, temp);
mul (temp, temp, $SQR_M_MOD_N);
copyVariable($SQR_M_MOD_N,temp);
mod (temp,$N,$SQR_M_MOD_N);
printf("\n[%4.2f] $SQR_M_MOD_N done.\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
InverseByModule(M, M_, $InverseByModuleMM_);
printf("\n[%4.2f] $InverseByModuleMM_ done [%i iterations].\n",(double)(clock() - begin) / CLOCKS_PER_SEC, profiler_extended_euclid_Iterations[0]);
InverseByModule ($N, M , $InverseByModule$NM);
printf("\n[%4.2f] $InverseByModule$NM done [%i iterations].\n",(double)(clock() - begin) / CLOCKS_PER_SEC, profiler_extended_euclid_Iterations[1]);
for( int i = 0; i < THREAD_NUMBER; i++){
ABmodN[0][i] = 1;
ABmodN[1][i] = 1;
mod_($N, Base[i], &N[i]);
mod_($N, Base_[i], &N_[i]);
div_(M, Base[i], Mi[i]);
div_(M_, Base_[i], Mi_[i]);
InverseByModule_(Mi[i], Base[i], &MiInv[i]);
InverseByModule_(Mi_[i], Base_[i], &Mi_Inv[i]);
mod_($SQR_M_MOD_N, Base[i], &SQR_M_MOD_N_RNS[i]);
mod_($SQR_M_MOD_N, Base_[i], &SQR_M_MOD_N_RNS_[i]);
mod_($InverseByModuleMM_, Base_[i], &InverseByModuleMM_[i]);
mod_($InverseByModule$NM, Base[i], &InverseByModuleNM[i]);
mod_(Mi_[i], Me, &Mi_InMe[i]);
mod_(Mi[i], Me, &MiInMe[i]);
mod_(M_, Base[i], &M_InBase[i]);
printf("\n[%4.2f] #%i thread computation done.\n",(double)(clock() - begin) / CLOCKS_PER_SEC, i);
}
for (int i = 0; i < THREAD_NUMBER; i++){
for (int j = 0; j < THREAD_NUMBER; j++){
mod_( Mi [j], Base_ [i], &MiInBase_[i][j] );
mod_( Mi_ [j], Base [i], &Mi_InBase[i][j] );
}
}
printf("\n[%4.2f] &MiInBase_ and &Mi_InBase done.\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
mod_($N, Me, &N_Me);
clearLongVariable( temp );
//(M_^-1 mod Me) % Me;
unsigned int t;
InverseByModule_(M_, Me, &t);
temp[ MAX - 1] = t;
mod_(temp, Me, &InverseByModuleM_Me);
printf("\n[%4.2f] &InverseByModuleM_Me done.\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
//(( M_ * Me ) ^ -1 mod Me) % Me;
unsigned int tempM_Me[ MAX ];
clearLongVariable( tempM_Me );
clearLongVariable( temp );
temp[MAX - 1] = Me;
mul(M_, temp, tempM_Me);
clearLongVariable( temp );
InverseByModule(M, tempM_Me, temp);
mod_(temp, Me, &InverseByModuleMMMe);
printf("\n[%4.2f] &InverseByModuleMMMe done.\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
printf("\nTOTAL TIME SPENT: %f s\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
}
int main(int argc, char **argv)
{
//argv[1] - input message
//argv[2] - action message
//argv[3] - method message
int mode; //encr/decr
int unit; //CPU/GPU
unsigned int $d[MAX];
unsigned int $e[MAX];
unsigned int q[MAX];
unsigned int p[MAX];
//unsigned int n[MAX];
unsigned int p_minus_1[MAX];
unsigned int q_minus_1[MAX];
unsigned int phi[MAX];
unsigned int temp[ MAX ];
unsigned int $input[ MAX ];
unsigned int R[THREAD_NUMBER], R_[THREAD_NUMBER];
clock_t begin;
cudaError_t cudaStatus;
char output [ MAX * 10 ];
mode = 2;
unit = 2;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
/*// n = p*q
mul ( p, q, n );
clearLongVariable ( _1 );
_1[ MAX - 1 ] = 1; // for storing digit "1"
clearLongVariable ( p_minus_1 );
clearLongVariable ( q_minus_1 );
// phi = (p-1)*(q-1);
sub ( p, _1, p_minus_1 );
sub ( q, _1, q_minus_1 );
mul ( p_minus_1, q_minus_1, phi );
*/
Me = 64;
/*
convertToNormalForm( "130ebebd67b16a9ab2c53a437badbf8f01a80c750095a7fcfe95742c3d5ed1abb318babc5cb5d9350fee4da65ee074f65e1758117e6945f0fcfc8137528053ce9d1da8618890dee24e5e0bf8c87795bb1d09eddd544640824ee0dd0ea9fd908d27b0f8a1ae5c37f3647fbf2f5795500ad76c195b3387d0458a8f51b701472301" , $N); // modulus
convertToNormalForm( "0123" , $input); // message
convertToNormalForm( "010001" , $e); // public exponenta
convertToNormalForm( "12e8da920d4599458e84ec5ef1656161807f427d05eb79182b7418259d6f6c14364d1f5caf9130c8d9d9d6ea71d1bdbc87781a46a16bcb9e672814fed3b9c96ddffe0a1b0955ae68055c8f92fef518a04fc32a2ea8390e617cc5556a251f9ae9eee70a32e579cb3e9f298848a9b3aaf634f5930ffbf74473f7cb6c0cefee1751" , $d); // secret exponenta
*/
convertToNormalForm( "025123" , $N); // modulus
convertToNormalForm( "01365D" , $e); // public exponenta
convertToNormalForm( "0AD" , $d) ; // secret exponenta
convertPowerToArrayOfBits($e, e, &eBitsCount, &numberOfIterationsForE);
convertPowerToArrayOfBits($d, d, &dBitsCount, &numberOfIterationsForD);
for (int i = 0; i < MAX; i++) {
ZERO_MEMORY[ i ] = 0;
}
for(int i=0; i<THREAD_NUMBER;i++){
Base[i] = 4294967296 - HHH[i];
Base_[i] = 4294967296 - KKK[i];
}
generateStaticData();
convertToNormalForm( argv[1] , $input); // message
for(int z=0; z< THREAD_NUMBER;z++) {
mod_($input, Base[z], &input[z]);
mod_($input, Base_[z], &input_[z]);
}
if( mode == 1 ){
//encrypt
iterationsCounter = 1;
currentOperationIndex = 0;
begin = clock();
for (int z = MAX * INT_SIZE - eBitsCount; z < MAX * INT_SIZE; z++){
MM(ABmodN[0], ABmodN[1], ABmodN[0], ABmodN[1], R, R_);
MM(R, R_, SQR_M_MOD_N_RNS, SQR_M_MOD_N_RNS_, ABmodN[0], ABmodN[1]);
if (e[z]){
MM(ABmodN[0], ABmodN[1], input, input_, R, R_);
MM(R, R_, SQR_M_MOD_N_RNS, SQR_M_MOD_N_RNS_, ABmodN[0], ABmodN[1]);
}
}
if(unit == 2)
printf("\nEncryption ended in : %f s\n",(double)(clock() - begin) / CLOCKS_PER_SEC / 15);
else
printf("\nEncryption ended in : %f s\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
};
if( mode == 2 ){
//decrypt
iterationsCounter = 1;
currentOperationIndex = 1;
begin = clock();
for (int z = MAX * INT_SIZE - dBitsCount; z < MAX * INT_SIZE; z++){
MM(ABmodN[0], ABmodN[1], ABmodN[0], ABmodN[1], R, R_);
MM(R, R_, SQR_M_MOD_N_RNS, SQR_M_MOD_N_RNS_, ABmodN[0], ABmodN[1]);
if (d[z]){
MM(ABmodN[0], ABmodN[1], input, input_, R, R_);
MM(R, R_, SQR_M_MOD_N_RNS, SQR_M_MOD_N_RNS_, ABmodN[0], ABmodN[1]);
}
}
if(unit == 2)
printf("\nDecryption ended in : %f s\n",(double)(clock() - begin) / CLOCKS_PER_SEC / 15);
else
printf("\nDecryption ended in : %f s\n",(double)(clock() - begin) / CLOCKS_PER_SEC);
}
convertFromRNS(ABmodN[0], temp);
convertFromNormalForm( output, temp );
printf("%s\n", output);
showProfilerInfo();
/*
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
for (int i=0; i< 100000; i++) {
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
/* cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
*/
Error:
getch();
return 0;
}
|
9bcdd2c7d791f2ee4a5975f3f345057187f3f733.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <sys/time.h>
#define NUM_ITERATIONS 10000
#define EPSILON 0.005
int NUM_PARTICLES = 10000;
int TPB = 32;
unsigned long get_time();
unsigned long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
unsigned long ret = tv.tv_usec;
ret /= 1000;
ret += (tv.tv_sec * 1000);
return ret;
}
struct Particle
{
float3 position;
float3 velocity;
};
float3 randomFloat3() {
float3 f;
f.x = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
f.y = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
f.z = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
return f;
}
// inicialize the array of Particles with random position and velocity
void inicializeParticles(Particle* particles){
for(int i = 0; i < NUM_PARTICLES ; i++){
particles[i].position = randomFloat3();
particles[i].velocity = randomFloat3();
}
}
// Inicialize all the necessary random variables at once in a matrix randNumbers
void inicializeRandNumbes(float3* randNumbers){
for(int i = 0; i < NUM_PARTICLES; i++) {
randNumbers[i] = randomFloat3();
}
}
__global__ void performStepGPU(Particle* particles, float3* rand_vel_update, int NUM_PARTICLES, float dt=1.0)
{
const int p_id = blockIdx.x*blockDim.x + threadIdx.x;
// only calculate if particle is inside the bounds
if (p_id < NUM_PARTICLES){
particles[p_id].position.x += dt * particles[p_id].velocity.x;
particles[p_id].position.y += dt * particles[p_id].velocity.y;
particles[p_id].position.z += dt * particles[p_id].velocity.z;
particles[p_id].velocity.x += rand_vel_update[p_id].x;
particles[p_id].velocity.y += rand_vel_update[p_id].y;
particles[p_id].velocity.z += rand_vel_update[p_id].z;
}
}
void performStepCPU(Particle* particles, float3* rand_vel_update, float dt=1.0){
for (int p_id = 0; p_id < NUM_PARTICLES; p_id++){
particles[p_id].position.x += dt * particles[p_id].velocity.x;
particles[p_id].position.y += dt * particles[p_id].velocity.y;
particles[p_id].position.z += dt * particles[p_id].velocity.z;
particles[p_id].velocity.x += rand_vel_update[p_id].x;
particles[p_id].velocity.y += rand_vel_update[p_id].y;
particles[p_id].velocity.z += rand_vel_update[p_id].z;
}
}
bool equalFinalState(Particle* p1, Particle* p2){
for(int i = 0 ; i < NUM_PARTICLES; i++){
if (std::abs(p1[i].position.x - p2[i].position.x) > EPSILON ||
std::abs(p1[i].position.y - p2[i].position.y) > EPSILON ||
std::abs(p1[i].position.z - p2[i].position.z) > EPSILON){
return false;
}
}
return true;
}
int main(int argc, char** argv)
{
NUM_PARTICLES = (argc >= 2) ? atoi(argv[1]) : 20000;
TPB = argc >= 3 ? atoi(argv[2]) : 128;
// seed for random number
srand (static_cast <unsigned> (time(0)));
// Array of particles
Particle* particles = (Particle*) malloc (sizeof(Particle)*NUM_PARTICLES);
inicializeParticles(particles);
// Array of random numbers
float3* randNumbers = (float3*) malloc (sizeof(float3)*NUM_PARTICLES);
inicializeRandNumbes(randNumbers);
// CPU execution
long start_time_cpu = get_time();
Particle* particles_cpu = (Particle*) malloc (sizeof(Particle)*NUM_PARTICLES);
// copy vector to use in the CPU
std::memcpy(particles_cpu, particles, NUM_PARTICLES*sizeof(Particle));
printf("Computing particles system on the CPU");
for(int i = 0 ; i < NUM_ITERATIONS ; i++){
performStepCPU(particles_cpu, randNumbers);
}
printf("Done\n");
long end_time_cpu = get_time();
// GPU execution
long start_time_gpu = get_time();
Particle* particles_gpu = 0;
float3* randNumbers_gpu = 0;
Particle* particles_gpu_res = (Particle*) malloc (sizeof(Particle)*NUM_PARTICLES);
// Allocate device memory
hipMallocManaged(&particles_gpu, NUM_PARTICLES*sizeof(Particle));
hipMallocManaged(&randNumbers_gpu, NUM_PARTICLES*sizeof(float3));
// Initialize array:
for (int i = 0; i < NUM_PARTICLES; i++){
randNumbers_gpu[i] = randNumbers[i];
particles_gpu[i] = particles[i];
}
// Launch kernel to compute the final state of particles
printf("Computing particles system on the GPU...");
for(int i = 0 ; i < NUM_ITERATIONS ; i++){
// Copy all partucles to device
hipLaunchKernelGGL(( performStepGPU), dim3((NUM_PARTICLES+TPB-1)/TPB), dim3(TPB), 0, 0, particles_gpu, randNumbers_gpu, NUM_PARTICLES);
hipDeviceSynchronize();
}
printf("Done\n");
long end_time_gpu = get_time();
// Compare results
printf("Comparing the output for each implementation");
equalFinalState(particles_gpu_res, particles_cpu) ? printf("Correct\n") : printf("Uncorrect\n");
printf("-----------------------------------------------\n");
printf("block size: %d ; NUM_PARTICLES: %d\n", TPB, NUM_PARTICLES);
printf("CPU time: %ld ms\n", end_time_cpu-start_time_cpu);
printf("GPU time: %ld ms\n", end_time_gpu-start_time_gpu);
printf("-----------------------------------------------\n");
// printf("%d %d %ld %ld\n", TPB, NUM_PARTICLES, end_time_cpu - start_time_cpu, end_time_gpu - start_time_gpu);
// Free the memory
hipFree(particles_gpu);
hipFree(randNumbers_gpu);
free(particles_cpu);
free(particles_gpu_res);
free(randNumbers);
free(particles);
hipDeviceReset();
return 0;
} | 9bcdd2c7d791f2ee4a5975f3f345057187f3f733.cu | #include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <sys/time.h>
#define NUM_ITERATIONS 10000
#define EPSILON 0.005
int NUM_PARTICLES = 10000;
int TPB = 32;
unsigned long get_time();
unsigned long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
unsigned long ret = tv.tv_usec;
ret /= 1000;
ret += (tv.tv_sec * 1000);
return ret;
}
struct Particle
{
float3 position;
float3 velocity;
};
float3 randomFloat3() {
float3 f;
f.x = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
f.y = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
f.z = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
return f;
}
// inicialize the array of Particles with random position and velocity
void inicializeParticles(Particle* particles){
for(int i = 0; i < NUM_PARTICLES ; i++){
particles[i].position = randomFloat3();
particles[i].velocity = randomFloat3();
}
}
// Inicialize all the necessary random variables at once in a matrix randNumbers
void inicializeRandNumbes(float3* randNumbers){
for(int i = 0; i < NUM_PARTICLES; i++) {
randNumbers[i] = randomFloat3();
}
}
__global__ void performStepGPU(Particle* particles, float3* rand_vel_update, int NUM_PARTICLES, float dt=1.0)
{
const int p_id = blockIdx.x*blockDim.x + threadIdx.x;
// only calculate if particle is inside the bounds
if (p_id < NUM_PARTICLES){
particles[p_id].position.x += dt * particles[p_id].velocity.x;
particles[p_id].position.y += dt * particles[p_id].velocity.y;
particles[p_id].position.z += dt * particles[p_id].velocity.z;
particles[p_id].velocity.x += rand_vel_update[p_id].x;
particles[p_id].velocity.y += rand_vel_update[p_id].y;
particles[p_id].velocity.z += rand_vel_update[p_id].z;
}
}
void performStepCPU(Particle* particles, float3* rand_vel_update, float dt=1.0){
for (int p_id = 0; p_id < NUM_PARTICLES; p_id++){
particles[p_id].position.x += dt * particles[p_id].velocity.x;
particles[p_id].position.y += dt * particles[p_id].velocity.y;
particles[p_id].position.z += dt * particles[p_id].velocity.z;
particles[p_id].velocity.x += rand_vel_update[p_id].x;
particles[p_id].velocity.y += rand_vel_update[p_id].y;
particles[p_id].velocity.z += rand_vel_update[p_id].z;
}
}
bool equalFinalState(Particle* p1, Particle* p2){
for(int i = 0 ; i < NUM_PARTICLES; i++){
if (std::abs(p1[i].position.x - p2[i].position.x) > EPSILON ||
std::abs(p1[i].position.y - p2[i].position.y) > EPSILON ||
std::abs(p1[i].position.z - p2[i].position.z) > EPSILON){
return false;
}
}
return true;
}
int main(int argc, char** argv)
{
NUM_PARTICLES = (argc >= 2) ? atoi(argv[1]) : 20000;
TPB = argc >= 3 ? atoi(argv[2]) : 128;
// seed for random number
srand (static_cast <unsigned> (time(0)));
// Array of particles
Particle* particles = (Particle*) malloc (sizeof(Particle)*NUM_PARTICLES);
inicializeParticles(particles);
// Array of random numbers
float3* randNumbers = (float3*) malloc (sizeof(float3)*NUM_PARTICLES);
inicializeRandNumbes(randNumbers);
// CPU execution
long start_time_cpu = get_time();
Particle* particles_cpu = (Particle*) malloc (sizeof(Particle)*NUM_PARTICLES);
// copy vector to use in the CPU
std::memcpy(particles_cpu, particles, NUM_PARTICLES*sizeof(Particle));
printf("Computing particles system on the CPU…");
for(int i = 0 ; i < NUM_ITERATIONS ; i++){
performStepCPU(particles_cpu, randNumbers);
}
printf("Done\n");
long end_time_cpu = get_time();
// GPU execution
long start_time_gpu = get_time();
Particle* particles_gpu = 0;
float3* randNumbers_gpu = 0;
Particle* particles_gpu_res = (Particle*) malloc (sizeof(Particle)*NUM_PARTICLES);
// Allocate device memory
cudaMallocManaged(&particles_gpu, NUM_PARTICLES*sizeof(Particle));
cudaMallocManaged(&randNumbers_gpu, NUM_PARTICLES*sizeof(float3));
// Initialize array:
for (int i = 0; i < NUM_PARTICLES; i++){
randNumbers_gpu[i] = randNumbers[i];
particles_gpu[i] = particles[i];
}
// Launch kernel to compute the final state of particles
printf("Computing particles system on the GPU...");
for(int i = 0 ; i < NUM_ITERATIONS ; i++){
// Copy all partucles to device
performStepGPU<<<(NUM_PARTICLES+TPB-1)/TPB, TPB>>>(particles_gpu, randNumbers_gpu, NUM_PARTICLES);
cudaDeviceSynchronize();
}
printf("Done\n");
long end_time_gpu = get_time();
// Compare results
printf("Comparing the output for each implementation…");
equalFinalState(particles_gpu_res, particles_cpu) ? printf("Correct\n") : printf("Uncorrect\n");
printf("-----------------------------------------------\n");
printf("block size: %d ; NUM_PARTICLES: %d\n", TPB, NUM_PARTICLES);
printf("CPU time: %ld ms\n", end_time_cpu-start_time_cpu);
printf("GPU time: %ld ms\n", end_time_gpu-start_time_gpu);
printf("-----------------------------------------------\n");
// printf("%d %d %ld %ld\n", TPB, NUM_PARTICLES, end_time_cpu - start_time_cpu, end_time_gpu - start_time_gpu);
// Free the memory
cudaFree(particles_gpu);
cudaFree(randNumbers_gpu);
free(particles_cpu);
free(particles_gpu_res);
free(randNumbers);
free(particles);
cudaDeviceReset();
return 0;
} |
b294f5800118e6f6a8c1d63b50d60f69dacab2ee.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <algorithm>
//#include "cutil.h"
#include "hip/hip_runtime.h"
#include <stdlib.h>
//#include "cusp/complex.h"
#include <cusp/complex.h>
#include <cusp/blas.h>
#include "mex.h"
#include "gpu/mxGPUArray.h"
//#include "mxGPUArray.h"
#include "polargrid.h"
/*
__inline__ __device__ void atomicAdd( cusp::complex<float> * x, cusp::complex<float> m)
{
atomicAdd(&(x[0]).x,m.x);
atomicAdd(&(x[0]).y,m.y);
}
*/
float cpu_kb_weight(float2 grid_pos, float2 point_pos,
float * kb_table,
int kb_table_size,
float kb_table_scale){
float dist_x = fabs(grid_pos.x-point_pos.x)*kb_table_scale;
float dist_y = fabs(grid_pos.y-point_pos.y)*kb_table_scale;
int ix = (int)dist_x;
float fx = dist_x-ix;
int iy = (int)dist_y;
float fy = dist_y-iy;
if(ix+1 < kb_table_size && iy+1 < kb_table_size){
return (kb_table[ix]*(1.0f-fx) + kb_table[ix+1]*(fx)) *
(kb_table[iy]*(1.0f-fy) + kb_table[iy+1]*(fy));
}
return 0.0f;
}
void grid_points_gold(const float * d_point_pos_x, const float * d_point_pos_y, const cusp::complex<float> * d_point_value,
const int npoints, const uint2 grid_size, const int * d_points_per_bin,const int * d_binned_points,
const int * d_binned_points_idx, const int * d_bin_location, const int * d_bin_dim_x,
const int * d_bin_dim_y,int nbins, const float * d_kb_table, int kb_table_size,
float kb_table_scale,
cusp::complex<float> * d_grid_value){
/* we're gonna receive almost all device pointers that we have to convert to CPU memory */
float * point_pos_x = new float[npoints];
hipMemcpy(point_pos_x,d_point_pos_x,sizeof(float)*npoints,hipMemcpyDeviceToHost);
float * point_pos_y = new float[npoints];
hipMemcpy(point_pos_y,d_point_pos_y,sizeof(float)*npoints,hipMemcpyDeviceToHost);
cusp::complex<float> * point_value = new cusp::complex<float>[npoints];
hipMemcpy(point_value,d_point_value,sizeof(cusp::complex<float>)*npoints,hipMemcpyDeviceToHost);
int * points_per_bin = new int[nbins];
hipMemcpy(points_per_bin,d_points_per_bin,sizeof(int)*nbins,hipMemcpyDeviceToHost);
int * binned_points_idx = new int[nbins];
hipMemcpy(binned_points_idx,d_binned_points_idx,sizeof(int)*nbins,hipMemcpyDeviceToHost);
int total_size = 0;
for(int i = 0;i<nbins;i++){
total_size+= points_per_bin[i];
total_size = 32*((total_size+31)/32);
}
int * binned_points = new int[total_size];
hipMemcpy(binned_points,d_binned_points,sizeof(int)*total_size,hipMemcpyDeviceToHost);
int * bin_location = new int[nbins];
hipMemcpy(bin_location,d_bin_location,sizeof(int)*nbins,hipMemcpyDeviceToHost);
int * bin_dim_x = new int[nbins];
hipMemcpy(bin_dim_x,d_bin_dim_x,sizeof(int)*nbins,hipMemcpyDeviceToHost);
int * bin_dim_y = new int[nbins];
hipMemcpy(bin_dim_y,d_bin_dim_y,sizeof(int)*nbins,hipMemcpyDeviceToHost);
cusp::complex<float> * grid_value = new cusp::complex<float>[grid_size.x*grid_size.y];
memset(grid_value,0,sizeof(cusp::complex<float>)*grid_size.x*grid_size.y);
float * kb_table = new float[kb_table_size];
hipMemcpy(kb_table,d_kb_table,sizeof(float)*kb_table_size,hipMemcpyDeviceToHost);
for(int i = 0;i<nbins;i++){
uint2 corner;
corner.x = bin_location[i]%grid_size.x;
corner.y = bin_location[i]/grid_size.x;
int idx = binned_points_idx[i];
for(int y = corner.y;y<corner.y+bin_dim_y[i];y++){
for(int x = corner.x;x<corner.x+bin_dim_x[i];x++){
grid_value[y*grid_size.x+x] = 0;
for(int j = 0;j<points_per_bin[i];j++){
grid_value[y*grid_size.x+x] += point_value[binned_points[idx+j]]*
cpu_kb_weight(make_float2(x,y),
make_float2(point_pos_x[binned_points[idx+j]],
point_pos_y[binned_points[idx+j]]),
kb_table,
kb_table_size,
kb_table_scale);
}
}
}
}
hipMemcpy(d_grid_value,grid_value,sizeof(cusp::complex<float>)*grid_size.x*grid_size.y,hipMemcpyHostToDevice);
}
//---------
texture<float, 1, hipReadModeElementType> texRef;
texture<int,1> tex_x_int;
texture<float,1> tex_x_float;
texture<float,1> tex_x_float1;
__inline__ __device__ cusp::complex<float> fetch_x(const int& i, const cusp::complex<float> * x)
{
return cusp::complex<float>(tex1Dfetch(tex_x_float, i*2),tex1Dfetch(tex_x_float, i*2+1));
}
__inline__ __device__ float fetch_x(const int& i,const float * x)
{
return tex1Dfetch(tex_x_float1, i);
}
void error_handle(hipError_t status = hipErrorLaunchFailure);
void error_handle(hipError_t status){
if(status != hipSuccess){
hipError_t s= hipGetLastError();
if(s != hipSuccess){
// printf("%s\n",hipGetErrorString(s));
exit(1);
}
}
}
__device__ float kb_weight(float2 grid_pos, float2 point_pos,
int kb_table_size, float kb_table_scale, const float * kb_table){
float dist_x = fabsf(grid_pos.x-point_pos.x)*kb_table_scale;
float dist_y = fabsf(grid_pos.y-point_pos.y)*kb_table_scale;
//float w=tex1D<float>(texRef,0.0f);
// return w;//tex1D<float>(texRef,dist_y);// *tex1D<float>(texRef,dist_y);
// return 1.0f;
int ix = (int)dist_x;
float fx = dist_x-ix;
int iy = (int)dist_y;
float fy = dist_y-iy;
if(ix+1 < kb_table_size && iy+1 < kb_table_size){
// return (tex1Dfetch<float>(texRef,ix)*(1.0f-fx) + tex1Dfetch<float>(texRef,ix+1)*(fx)) *
// (tex1Dfetch<float>(texRef,iy)*(1.0f-fy) + tex1Dfetch<float>(texRef,iy+1)*(fy));
return (fetch_x(ix,kb_table)*(1.0f-fx)+ fetch_x(ix+1,kb_table)*(fx))*
(fetch_x(iy,kb_table)*(1.0f-fy)+ fetch_x(iy+1,kb_table)*(fy));
}
return 0.0f;
/* */
}
__device__ float kb_weight(float grid_x, float grid_y, float point_pos_x,
float point_pos_y,
int kb_table_size,
float kb_table_scale, const float * kb_table){
float dist_x = fabsf(grid_x-point_pos_x)*kb_table_scale;
float dist_y = fabsf(grid_y-point_pos_y)*kb_table_scale;
// return tex1D<float>(texRef,dist_x) *tex1D<float>(texRef,dist_y);
int ix = (int)dist_x;
float fx = dist_x-ix;
int iy = (int)dist_y;
float fy = dist_y-iy;
if(ix+1 < kb_table_size && iy+1 < kb_table_size){
return (fetch_x(ix,kb_table)*(1.0f-fx)+ fetch_x(ix+1,kb_table)*(fx))*
(fetch_x(iy,kb_table)*(1.0f-fy)+ fetch_x(iy+1,kb_table)*(fy));
/*
return (tex1Dfetch<float>(texRef,ix)*(1.0f-fx) + tex1Dfetch<float>(texRef,ix+1)*(fx)) *
(tex1Dfetch<float>(texRef,iy)*(1.0f-fy) + tex1Dfetch<float>(texRef,iy+1)*(fy));
*/
}
return 0.0f;
}
__device__ float kb_weight(float2 grid_pos, float2 point_pos,
int kb_table_size,
float kb_table_scale, const float * kb_table,int tid){
float dist_x = fabsf(grid_pos.x-point_pos.x)*kb_table_scale;
float dist_y = fabsf(grid_pos.y-point_pos.y)*kb_table_scale;
// return 0.0f;
// return tex1D<float>(texRef,dist_x) *tex1D<float>(texRef,dist_y);
// return tex1D<float>(texRef,dist_x) *tex1D<float>(texRef,dist_y);
float ix = rintf(dist_x);
float fx = dist_x-ix;
float iy = rintf(dist_y);
float fy = dist_y-iy;
if(ix+1 < kb_table_size && iy+1 < kb_table_size){
return (fetch_x(ix,kb_table)*(1.0f-fx)+ fetch_x(ix+1,kb_table)*(fx))*
(fetch_x(iy,kb_table)*(1.0f-fy)+ fetch_x(iy+1,kb_table)*(fy));
/*
return (tex1Dfetch<float>(texRef,tid)*(1.0f-fx) + tex1Dfetch<float>(texRef,tid)*(fx)) *
(tex1Dfetch<float>(texRef,tid)*(1.0f-fy) + tex1Dfetch<float>(texRef,tid)*(fy));
*/
}
return 0.0f;
}
/*
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__global__ void sum_points( const cusp::complex<float> * point_value,
int npoints, uint2 grid_size,
const int * points_per_bin,
const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points,
const int * binned_points_idx,
const int * bin_location,
const float * binned_points_x,
const float * binned_points_y,
const int nbins,
const int kb_table_size,
const float kb_table_scale, const float * kb_table, hipTextureObject_t texRef,
cusp::complex<float> * grid_value,int pbid){
__shared__ cusp::complex<float> value;
__shared__ cusp::complex<float> sum_t[BLOCKSIZE];
// Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float>
// int i = blockIdx.x;
int i = pbid;
int tid = threadIdx.x;
int jj = blockIdx.x;
uint2 corner;
corner.x = bin_location[i]%grid_size.x;
corner.y = bin_location[i]/grid_size.x;
const int idx = binned_points_idx[i];
const int ppb = points_per_bin[i];
// cusp::complex<float> * value;
const int bd=BLOCKSIZE;
// const int bd=blockDim.x;
//const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
int bdx=bin_dimension_x[i];
// loop through grid
for(int yi = corner.y;yi<corner.y+bin_dimension_x[i];yi+=1){
int y=(yi-corner.y+jj)%bdx+corner.y; //shift so that there is no overlap
// int y=yi;
for(int x = corner.x;x<corner.x+bin_dimension_y[i];x+=1){
sum_t[tid] = 0;
for(int j = tid+jj*bd ;j<ppb;j+=bd*gridDim.x){
sum_t[tid] += point_value[binned_points[idx+j]]*
kb_weight(make_float2(x,y),
make_float2(binned_points_x[idx+j],binned_points_y[idx+j]),
kb_table_size,kb_table_scale, kb_table,texRef);
}
__syncthreads();
for(unsigned int j=1; j < bd; j *= 2) {
// modulo arithmetic is slow!
if ((tid & (2*j-1)) == 0) { sum_t[tid] += sum_t[tid + j]; }
__syncthreads();
}
// hipDeviceSynchronize();
if(tid == 0){
// grid_value[y*grid_size.x+x]+=(cusp::complex<float>) sum_t[0];
atomicAdd(&(grid_value[y*grid_size.x+x]),(sum_t[0]));
}
}
}
}
*/
//
// __device__ float kb_weight(float2 grid_pos, float2 point_pos,
// float * kb_table, int kb_table_size,
// float kb_table_scale){
// float dist_x = fabsf(grid_pos.x-point_pos.x)*kb_table_scale;
// float dist_y = fabsf(grid_pos.y-point_pos.y)*kb_table_scale;
// int ix = (int)dist_x;
// float fx = dist_x-rintf(dist_x);
// int iy = (int)dist_y;
// float fy = dist_y-rintf(dist_y);
//
// if(ix+1 < kb_table_size && iy+1 < kb_table_size){
// return (kb_table[ix]*(1.0f-fx) + kb_table[ix+1]*(fx)) *
// (kb_table[iy]*(1.0f-fy) + kb_table[iy+1]*(fy));
// }
// return 0.0f;
// }
/*
__global__ void grid_points_cuda_mex_interleaved_kernel(const float * point_x,
const float * point_y,
const cusp::complex<float> * point_value,
int npoints, uint2 grid_size,
const int * points_per_bin,
const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points,
const int * binned_points_idx,
const int * bin_location,
const float * binned_points_x,
const float * binned_points_y,
int nbins,
int kb_table_size,
float kb_table_scale,
cusp::complex<float> * grid_value){
// Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float>
//typedef hipcub::BlockReduce<cusp::complex<float>, 128> BlockReduce;
// Allocate shared memory for BlockReduce
//__shared__ typename BlockReduce::TempStorage temp_storage;
//int aggregate = BlockReduce(temp_storage).Sum(thread_data);
int i = blockIdx.x;
int tid = threadIdx.x;
uint2 corner;
corner.x = bin_location[i]%grid_size.x;
corner.y = bin_location[i]/grid_size.x;
int idx = binned_points_idx[i];
__shared__ float point_pos_cache_x[SHARED_SIZE];
__shared__ float point_pos_cache_y[SHARED_SIZE];
__shared__ cusp::complex<float> point_value_cache[SHARED_SIZE];
__shared__ cusp::complex<float> sum_t[BLOCKSIZE];
// small bin or large no of samples
if(bin_dimension_x[i]*bin_dimension_y[i] < 64 || points_per_bin[i] > SHARED_SIZE){
sum_t[tid] = 0;
// loop through grid
for(int y = corner.y;y<corner.y+bin_dimension_x[i];y+=1){
for(int x = corner.x;x<corner.x+bin_dimension_y[i];x+=1){
sum_t[tid] = 0;
// loop through points
for(int j = tid;j<points_per_bin[i];j+=blockDim.x){
sum_t[tid] += point_value[binned_points[idx+j]]*kb_weight(make_float2(x,y),
make_float2(binned_points_x[idx+j],
binned_points_y[idx+j]),
kb_table_size,kb_table_scale);
}
// Do a reduce in shared memory //
for(unsigned int j=1; j < blockDim.x; j *= 2) {
// modulo arithmetic is slow!
if ((tid & (2*j-1)) == 0) {
sum_t[tid] += sum_t[tid + j];
}
__syncthreads();
}
if(tid == 0){
grid_value[y*grid_size.x+x] = sum_t[0];
}
}
}
// large dimensions
}else if(bin_dimension_x[i]*bin_dimension_y[i] >BLOCKSIZE/2-1) {
// Lets try to load all points to shared memory /
const int ppb = points_per_bin[i];
for(int j = tid;j<ppb;j+= blockDim.x){
const int point = binned_points[idx+j];
point_value_cache[j] = point_value[point];
point_pos_cache_x[j] = binned_points_x[idx+j];
point_pos_cache_y[j] = binned_points_y[idx+j];
}
__syncthreads();
const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
// loop through dimensions
for(int k = tid;k<dims.x*dims.y;k+=blockDim.x){
const int x = (k%(dims.x))+corner.x;
const int y = (k/dims.x)+corner.y;
cusp::complex<float> my_sum = 0;
for(int j = 0;j<ppb;j++){ //loop through all the points
float w= kb_weight(x,y,point_pos_cache_x[j],point_pos_cache_y[j],kb_table_size,kb_table_scale);
my_sum += point_value_cache[j]*w;
}
grid_value[y*grid_size.x+x] = my_sum;
}
}else{ //small dimension and few points
// Lets try to load things to shared memory /
const int ppb = points_per_bin[i];
for(int j = tid;j<ppb;j+= blockDim.x){
const int point = binned_points[idx+j];
point_value_cache[j] = point_value[point];
point_pos_cache_x[j] = binned_points_x[idx+j];
point_pos_cache_y[j] = binned_points_y[idx+j];
}
__syncthreads();
const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
int b = 4;
for(int k = tid/b;k<dims.x*dims.y;k+=blockDim.x/b){
const int x = (k%(dims.x))+corner.x;
const int y = (k/dims.x)+corner.y;
sum_t[tid] = 0;
//sum_i[tid] = 0;
for(int j = (tid&(b-1));j<ppb;j+=b){
float w= kb_weight(x,y,point_pos_cache_x[j],point_pos_cache_y[j],kb_table_size,kb_table_scale);
sum_t[tid] += point_value_cache[j]*w;
}
// Do a reduce in shared memory
for(unsigned int j=1; j < b; j = (j << 1)) {
// modulo arithmetic is slow!
if ((tid & ((j<<1)-1)) == 0) {
sum_t[tid] += sum_t[tid + j];
}
__syncthreads();
}
if((tid&(b-1)) == 0){
grid_value[y*grid_size.x+x] = sum_t[tid];
}
}
}
}
*/
//------------------------------
//point_value[binned_points[idx+j]]*kb_weight(make_float2(x,y),
// make_float2(binned_points_x[idx+j],
// binned_points_y[idx+j]),
// kb_table_size,kb_table_scale);
//call=
//transform_4in1out( binned_points_x,binned_points_y,binned_points,point_value,make_float2(x,y), kb_table_size,kb_table_scale);
/*
template <typename IN, typename OUT>
struct KBMUL
{
float xs;
float ys;
int kb_table_size;
float kb_table_scale;
KBMUL(float _xs, float _ys){
xs = _xs;
ys= _ys;
kb_table_size=_kb_table_size;
kb_table_scale=_kb_table_scale;
}
template <typename Tuple>
__host__ __device__
OUT operator()(Tuple x)
{
// OUT out;
IN point_value= thrust::get<0>(x);
float binned_points_x= thrust::get<1>(x);
float binned_points_y= thrust::get<2>(x);
OUT ret = point_value[binned_points]*kb_weight(make_float2(xs,ys),
make_float2(binned_points_x,binned_points_y),
kb_table_size,kb_table_scale);
return ret*ret;
}
};
*/
//------------------------
/*
* sum_t[tid] += point_value[binned_points[idx+j]]*kb_weight(make_float2(x,y),
make_float2(binned_points_x[idx+j],
binned_points_y[idx+j]),
kb_table_size,kb_table_scale);
* template <typename T1,typename T2>
* struct AbsSubtract2 : public thrust::unary_function<T1,T2>
{
__host__ __device__
T2 operator()(T1 x)
{
T2 ret = abs(abs(thrust::get<0>(x))-(thrust::get<1>(x)));
return ret*ret;
}
};
*/
//=========================
/*
template<typename IN,typename OUT>
void transform_4in_1out(float * BINNED_POINTS_X, float * BINNED_POINTS_Y, int BINNED_POINTS, IN
* POINT_VALUE, float2 * POSITIONS,
int kb_table_size, float kb_table_scale, float * KBTABLE, OUT * derr, int N){
thrust::device_ptr<float> d_binned_points_x(BINNED_POINTS_X);
thrust::device_ptr<float> d_binned_points_y(BINNED_POINTS_Y);
thrust::device_ptr<int> d_binned_points(BINNED_POINTS);
thrust::device_ptr<float> d_value(POINT_VALUE);
thrust::device_ptr<float> d_positions(POSITIONS);
}
// transform_3in_2out(d_G,d_DG,d_a, (float ) tau, &der,&d2er,n);
// transform_3in_1out(d_G,d_DG,d_a, (float ) tau, &der,n);
template<typename IN,typename OUT>
// void transform_3in_2out(IN * G, IN * dG, float * F, float tau, OUT * derr, OUT * d2err, int N){
void transform_3in_2out(IN * G, IN * dG, float * F, float tau, OUT * derr, OUT * d2err, int N){
thrust::device_ptr<IN> d_G(G);
thrust::device_ptr<IN> d_dG(dG);
thrust::device_ptr<float> d_F(F);
thrust::tuple<OUT,OUT> init;
thrust::tuple<OUT,OUT> out = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(d_G, d_dG, d_F)),
thrust::make_zip_iterator(thrust::make_tuple(d_G, d_dG, d_F))+N,
DIR<IN,OUT>(tau),
init,
TUPLE_PLUS<thrust::tuple<OUT,OUT> >());
*derr = thrust::get<0>(out)*2;
*d2err = thrust::get<1>(out)*2;
}
*/
//--------------------------------
__global__ void grid_points_cuda_mex_interleaved_kernel1(const float * point_x,
const float * point_y,
const cusp::complex<float> * point_value,
int npoints, uint2 grid_size,
const int * points_per_bin,
const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points,
const int * binned_points_idx,
const int * bin_location,
const float * binned_points_x,
const float * binned_points_y,
int nbins,
int kb_table_size,
float kb_table_scale, const float * kb_table,
cusp::complex<float> * grid_value){
__shared__ float point_pos_cache_x[SHARED_SIZE];
__shared__ float point_pos_cache_y[SHARED_SIZE];
__shared__ cusp::complex<float> point_value_cache[SHARED_SIZE];
__shared__ cusp::complex<float> sum_t[BLOCKSIZE];
int i = blockIdx.x;
int tid = threadIdx.x;
uint2 corner;
corner.x = bin_location[i]%grid_size.x;
corner.y = bin_location[i]/grid_size.x;
int idx = binned_points_idx[i];
const int ppb = points_per_bin[i];
// small bin or large no of samples
if(bin_dimension_x[i]*bin_dimension_y[i] < 64 || points_per_bin[i] > SHARED_SIZE){
// loop through grid
for(int y = corner.y;y<corner.y+bin_dimension_x[i];y+=1){
for(int x = corner.x;x<corner.x+bin_dimension_y[i];x+=1){
// sum_points<<<1,BLOCKSIZE>>> (point_value,binned_points,binned_points_x,binned_points_y,idx,points_per_bin[idx],x,y,kb_table_size,kb_table_scale, value);
// cusp::complex<float> value[1];
//grid_value[y*grid_size.x+x]=0;
// sum_points<<<1,BLOCKSIZE>>> (point_value,binned_points,binned_points_x,binned_points_y,idx,ppb,x,y,kb_table_size,kb_table_scale,grid_value+y*grid_size.x+x);
sum_t[tid] = 0;
// Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float>
//typedef hipcub::BlockReduce<cusp::complex<float>, 128> BlockReduce;
// Allocate shared memory for BlockReduce
//__shared__ typename BlockReduce::TempStorage temp_storage;
// grid_value[y*grid_size.x+x]= BlockReduce(temp_storage).Sum(thread_data);
// for(int item=0; item<ITEMS_PER_THREAD; ++item)
// data[item] = unaryOp(data[item]);
sum_t[tid] = 0;
// loop through points
for(int j = tid;j<ppb;j+=blockDim.x){
sum_t[tid] += point_value[binned_points[idx+j]]*kb_weight(make_float2(x,y),
make_float2(binned_points_x[idx+j],
binned_points_y[idx+j]),
kb_table_size,kb_table_scale,kb_table );
}
// Do a reduce in shared memory
for(unsigned int j=1; j < blockDim.x; j *= 2) {
// modulo arithmetic is slow!
if ((tid & (2*j-1)) == 0) {
sum_t[tid] += sum_t[tid + j];
}
__syncthreads();
}
if(tid == 0){
grid_value[y*grid_size.x+x] = sum_t[0];
}
}
}
// large dimensions
}else if(bin_dimension_x[i]*bin_dimension_y[i] >BLOCKSIZE/2-1) {
/* Lets try to load all points to shared memory */
const int ppb = points_per_bin[i];
for(int j = tid;j<ppb;j+= blockDim.x){
const int point = binned_points[idx+j];
point_value_cache[j] = point_value[point];
point_pos_cache_x[j] = binned_points_x[idx+j];
point_pos_cache_y[j] = binned_points_y[idx+j];
}
__syncthreads();
const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
// loop through dimensions
for(int k = tid;k<dims.x*dims.y;k+=blockDim.x){
const int x = (k%(dims.x))+corner.x;
const int y = (k/dims.x)+corner.y;
cusp::complex<float> my_sum = 0;
for(int j = 0;j<ppb;j++){ //loop through all the points
float w= kb_weight(x,y,point_pos_cache_x[j],point_pos_cache_y[j],kb_table_size,kb_table_scale, kb_table);
my_sum += point_value_cache[j]*w;
}
grid_value[y*grid_size.x+x] = my_sum;
}
}else{ //small dimension and few points
/* Lets try to load things to shared memory */
const int ppb = points_per_bin[i];
for(int j = tid;j<ppb;j+= blockDim.x){
const int point = binned_points[idx+j];
point_value_cache[j] = point_value[point];
point_pos_cache_x[j] = binned_points_x[idx+j];
point_pos_cache_y[j] = binned_points_y[idx+j];
}
__syncthreads();
const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
int b = 4;
for(int k = tid/b;k<dims.x*dims.y;k+=blockDim.x/b){
const int x = (k%(dims.x))+corner.x;
const int y = (k/dims.x)+corner.y;
sum_t[tid] = 0;
//sum_i[tid] = 0;
for(int j = (tid&(b-1));j<ppb;j+=b){
float w= kb_weight(x,y,point_pos_cache_x[j],point_pos_cache_y[j],kb_table_size,kb_table_scale,kb_table);
sum_t[tid] += point_value_cache[j]*w;
}
/* Do a reduce in shared memory */
for(unsigned int j=1; j < b; j = (j << 1)) {
// modulo arithmetic is slow!
if ((tid & ((j<<1)-1)) == 0) {
sum_t[tid] += sum_t[tid + j];
}
__syncthreads();
}
if((tid&(b-1)) == 0){
grid_value[y*grid_size.x+x] = sum_t[tid];
}
}
}
}
void grid_points_cuda_interleaved_mex(const float * point_pos_x, const float * point_pos_y,
const cusp::complex<float> * point_value, int npoints,
uint2 grid_size, const int * points_per_bin, const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points, const int * binned_points_idx, const int * bin_location,
const float * binned_points_x, const float * binned_points_y,
int nbins,
const float * kb_table,
int kb_table_size,
float kb_table_scale,
cusp::complex<float> * grid_value){
hipMemset(grid_value,0,sizeof(float2)*grid_size.x*grid_size.y);
size_t offset;
hipBindTexture(&offset,texRef, kb_table, sizeof(float)*kb_table_size);
if(offset != 0){
// printf("Error: Texture offset different than zero. Table not allocated with hipMalloc!%d\n");
return;
}
int grid = nbins;
int block_size = BLOCKSIZE;
clock_t t_i = clock();
hipLaunchKernelGGL(( grid_points_cuda_mex_interleaved_kernel1), dim3(grid),dim3(block_size), 0, 0, point_pos_x, point_pos_y,
point_value, npoints, grid_size, points_per_bin,
bin_dimension_x, bin_dimension_y, binned_points,
binned_points_idx, bin_location,
binned_points_x, binned_points_y,nbins,
kb_table_size,
kb_table_scale, kb_table, grid_value);
hipDeviceSynchronize();
clock_t t_e = clock();
error_handle();
// printf("%d iter in %5.1f ms\n",iter,(t_e-t_i)*1000.0/CLOCKS_PER_SEC);
}
void compare_to_gold(float * gridded, float * gold_gridded, uint2 grid_size){
for(int i =0;i<grid_size.x*grid_size.y;i++){
if(fabs(gridded[i]-gold_gridded[i])/gridded[i] > 1e-5 &&
fabs(gridded[i]-gold_gridded[i]) > 1e-7){
printf("cuda[%d] = %e gold[%d] = %e\n",i,gridded[i],i,gold_gridded[i]);
exit(1);
}
}
}
//---------
#define SX prhs[0]
#define SY prhs[1]
#define SV prhs[2]
#define GRID_DIM prhs[3]
#define SPB prhs[4]
#define BIN_DIM_X prhs[5]
#define BIN_DIM_Y prhs[6]
#define SIB prhs[7]
#define BSO prhs[8]
#define BL prhs[9]
#define BPX prhs[10]
#define BPY prhs[11]
#define KLUT prhs[12]
#define KLUTS prhs[13]
void mexFunction(int nlhs, mxArray * plhs[], int nrhs,const mxArray * prhs[]){
/* Initialize the MathWorks GPU API. */
mxInitGPU();
mxGPUArray const *samples_x;
mxGPUArray const *samples_y;
mxGPUArray const *samples_values;
mxGPUArray const *samples_per_bin;
mxGPUArray const *bin_dimensions_x;
mxGPUArray const *bin_dimensions_y;
mxGPUArray const *samples_in_bin;
mxGPUArray const *bin_start_offset;
mxGPUArray const *bin_location;
mxGPUArray const *bin_points_x;
mxGPUArray const *bin_points_y;
mxGPUArray const *kernel_lookup_table;
//int *grid_dim =(int *) mxGetPr(GRID_DIM);
float kernel_lookup_table_scale = mxGetScalar(KLUTS);
int *grid_dim0=( int *) (mxGetData(GRID_DIM));
mwSize *grid_dim=(mwSize *)grid_dim0;
//mexPrintf("Grid Dimensions %d x %d\n",(grid_dim[0]),(grid_dim[1]));
//mexPrintf("Grid Dimensions %d x %d\n",(grid_dim1[0]),(grid_dim1[1]));
// OUTPUT
mxGPUArray *grid_values, *gold_grid_values;
samples_x = mxGPUCreateFromMxArray(SX);
samples_y = mxGPUCreateFromMxArray(SY);
samples_values = mxGPUCreateFromMxArray(SV);
samples_per_bin = mxGPUCreateFromMxArray(SPB);
bin_dimensions_x = mxGPUCreateFromMxArray(BIN_DIM_X);
bin_dimensions_y = mxGPUCreateFromMxArray(BIN_DIM_Y);
samples_in_bin = mxGPUCreateFromMxArray(SIB);
bin_start_offset = mxGPUCreateFromMxArray(BSO);
bin_location = mxGPUCreateFromMxArray(BL);
bin_points_x = mxGPUCreateFromMxArray(BPX);
bin_points_y = mxGPUCreateFromMxArray(BPY);
kernel_lookup_table= mxGPUCreateFromMxArray(KLUT);
int nbins = (int) (mxGPUGetNumberOfElements(bin_dimensions_x));
int npoints = (int)(mxGPUGetNumberOfElements(samples_x));
int kernel_lookup_table_size = ( int)(mxGPUGetNumberOfElements(kernel_lookup_table));
mwSize ndim= 2;
// mwSize *grid_dim1[]={(mwSize grid_dim[0]), }
// output:
// float2 * grid_values;
// float2 * gold_grid_values;
// plhs[0] = jkt_new( grid_dim[0], grid_dim[1], mxSINGLE_CLASS, mxREAL,);
//grid_values= mxGPUCreateGPUArray(ndim,grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
grid_values= mxGPUCreateGPUArray(ndim, grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
// now get the pointer or whatever it is
const float *d_samples_x = (const float *)(mxGPUGetDataReadOnly(samples_x));
const float *d_samples_y = (const float *)(mxGPUGetDataReadOnly(samples_y));
// float2 *d_samples_values = (float2 *)(const float2 *)(mxGPUGetDataReadOnly(samples_values));
const cusp::complex<float> *d_samples_values = (const cusp::complex<float> *)(mxGPUGetDataReadOnly(samples_values));
const int * d_samples_per_bin = (const int *)(mxGPUGetDataReadOnly(samples_per_bin));
const int * d_bin_dimensions_x = (const int *)(mxGPUGetDataReadOnly(bin_dimensions_x));
const int * d_bin_dimensions_y = (const int *)(mxGPUGetDataReadOnly(bin_dimensions_y));
const int * d_samples_in_bin = (const int *)(mxGPUGetDataReadOnly(samples_in_bin));
const int * d_bin_start_offset =(const int *)(mxGPUGetDataReadOnly(bin_start_offset));
const int * d_bin_location = (const int *)(mxGPUGetDataReadOnly(bin_location));
const float * d_bin_points_x = (const float *)(mxGPUGetDataReadOnly(bin_points_x));
const float * d_bin_points_y = (const float *)(mxGPUGetDataReadOnly(bin_points_y));
const float * d_kernel_lookup_table = (const float *)(mxGPUGetDataReadOnly(kernel_lookup_table));
const uint2 grid_size = {grid_dim[0],grid_dim[1]};
//float2 * d_grid_values = (float2 *)(mxGPUGetData(grid_values));
cusp::complex<float> * d_grid_values = (cusp::complex<float> *)(mxGPUGetData(grid_values));
// mexErrMsgTxt("gpuArray 1");
grid_points_cuda_interleaved_mex( d_samples_x, d_samples_y,
d_samples_values, npoints,
grid_size, d_samples_per_bin, d_bin_dimensions_x, d_bin_dimensions_y,
d_samples_in_bin, d_bin_start_offset, d_bin_location,
d_bin_points_x, d_bin_points_y,
nbins, d_kernel_lookup_table,
kernel_lookup_table_size,
kernel_lookup_table_scale,
d_grid_values);
//mexErrMsgTxt("gpuArray 2");
plhs[0] = mxGPUCreateMxArrayOnGPU(grid_values);
if(nlhs == 2){
//gold_grid_values= mxGPUCreateGPUArray(ndim, grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
gold_grid_values= mxGPUCreateGPUArray(ndim, grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_INITIALIZE_VALUES);
//gold_grid_values= mxGPUCreateGPUArray(ndim,grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
//float2 * d_gold_grid_values = (float2 *)(mxGPUGetData(gold_grid_values));
cusp::complex<float> * d_gold_grid_values = ( cusp::complex<float> *)(mxGPUGetData(gold_grid_values));
grid_points_gold (d_samples_x, d_samples_y,
d_samples_values, npoints,
grid_size, d_samples_per_bin, d_samples_in_bin, d_bin_start_offset, d_bin_location,
d_bin_dimensions_x, d_bin_dimensions_y,nbins, d_kernel_lookup_table, kernel_lookup_table_size,
kernel_lookup_table_scale,d_gold_grid_values);
plhs[1] = mxGPUCreateMxArrayOnGPU(gold_grid_values);
mxGPUDestroyGPUArray( gold_grid_values);
}
mxGPUDestroyGPUArray( samples_x);
mxGPUDestroyGPUArray( samples_y);
mxGPUDestroyGPUArray( samples_values);
mxGPUDestroyGPUArray( samples_per_bin);
mxGPUDestroyGPUArray( bin_dimensions_x);
mxGPUDestroyGPUArray( bin_dimensions_y);
mxGPUDestroyGPUArray( samples_in_bin);
mxGPUDestroyGPUArray( kernel_lookup_table);
mxGPUDestroyGPUArray( bin_start_offset);
mxGPUDestroyGPUArray( bin_location);
mxGPUDestroyGPUArray( bin_points_x);
mxGPUDestroyGPUArray( bin_points_y);
mxGPUDestroyGPUArray( grid_values);
}
| b294f5800118e6f6a8c1d63b50d60f69dacab2ee.cu | #include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <algorithm>
//#include "cutil.h"
#include "cuda.h"
#include <stdlib.h>
//#include "cusp/complex.h"
#include <cusp/complex.h>
#include <cusp/blas.h>
#include "mex.h"
#include "gpu/mxGPUArray.h"
//#include "mxGPUArray.h"
#include "polargrid.h"
/*
__inline__ __device__ void atomicAdd( cusp::complex<float> * x, cusp::complex<float> m)
{
atomicAdd(&(x[0]).x,m.x);
atomicAdd(&(x[0]).y,m.y);
}
*/
float cpu_kb_weight(float2 grid_pos, float2 point_pos,
float * kb_table,
int kb_table_size,
float kb_table_scale){
float dist_x = fabs(grid_pos.x-point_pos.x)*kb_table_scale;
float dist_y = fabs(grid_pos.y-point_pos.y)*kb_table_scale;
int ix = (int)dist_x;
float fx = dist_x-ix;
int iy = (int)dist_y;
float fy = dist_y-iy;
if(ix+1 < kb_table_size && iy+1 < kb_table_size){
return (kb_table[ix]*(1.0f-fx) + kb_table[ix+1]*(fx)) *
(kb_table[iy]*(1.0f-fy) + kb_table[iy+1]*(fy));
}
return 0.0f;
}
void grid_points_gold(const float * d_point_pos_x, const float * d_point_pos_y, const cusp::complex<float> * d_point_value,
const int npoints, const uint2 grid_size, const int * d_points_per_bin,const int * d_binned_points,
const int * d_binned_points_idx, const int * d_bin_location, const int * d_bin_dim_x,
const int * d_bin_dim_y,int nbins, const float * d_kb_table, int kb_table_size,
float kb_table_scale,
cusp::complex<float> * d_grid_value){
/* we're gonna receive almost all device pointers that we have to convert to CPU memory */
float * point_pos_x = new float[npoints];
cudaMemcpy(point_pos_x,d_point_pos_x,sizeof(float)*npoints,cudaMemcpyDeviceToHost);
float * point_pos_y = new float[npoints];
cudaMemcpy(point_pos_y,d_point_pos_y,sizeof(float)*npoints,cudaMemcpyDeviceToHost);
cusp::complex<float> * point_value = new cusp::complex<float>[npoints];
cudaMemcpy(point_value,d_point_value,sizeof(cusp::complex<float>)*npoints,cudaMemcpyDeviceToHost);
int * points_per_bin = new int[nbins];
cudaMemcpy(points_per_bin,d_points_per_bin,sizeof(int)*nbins,cudaMemcpyDeviceToHost);
int * binned_points_idx = new int[nbins];
cudaMemcpy(binned_points_idx,d_binned_points_idx,sizeof(int)*nbins,cudaMemcpyDeviceToHost);
int total_size = 0;
for(int i = 0;i<nbins;i++){
total_size+= points_per_bin[i];
total_size = 32*((total_size+31)/32);
}
int * binned_points = new int[total_size];
cudaMemcpy(binned_points,d_binned_points,sizeof(int)*total_size,cudaMemcpyDeviceToHost);
int * bin_location = new int[nbins];
cudaMemcpy(bin_location,d_bin_location,sizeof(int)*nbins,cudaMemcpyDeviceToHost);
int * bin_dim_x = new int[nbins];
cudaMemcpy(bin_dim_x,d_bin_dim_x,sizeof(int)*nbins,cudaMemcpyDeviceToHost);
int * bin_dim_y = new int[nbins];
cudaMemcpy(bin_dim_y,d_bin_dim_y,sizeof(int)*nbins,cudaMemcpyDeviceToHost);
cusp::complex<float> * grid_value = new cusp::complex<float>[grid_size.x*grid_size.y];
memset(grid_value,0,sizeof(cusp::complex<float>)*grid_size.x*grid_size.y);
float * kb_table = new float[kb_table_size];
cudaMemcpy(kb_table,d_kb_table,sizeof(float)*kb_table_size,cudaMemcpyDeviceToHost);
for(int i = 0;i<nbins;i++){
uint2 corner;
corner.x = bin_location[i]%grid_size.x;
corner.y = bin_location[i]/grid_size.x;
int idx = binned_points_idx[i];
for(int y = corner.y;y<corner.y+bin_dim_y[i];y++){
for(int x = corner.x;x<corner.x+bin_dim_x[i];x++){
grid_value[y*grid_size.x+x] = 0;
for(int j = 0;j<points_per_bin[i];j++){
grid_value[y*grid_size.x+x] += point_value[binned_points[idx+j]]*
cpu_kb_weight(make_float2(x,y),
make_float2(point_pos_x[binned_points[idx+j]],
point_pos_y[binned_points[idx+j]]),
kb_table,
kb_table_size,
kb_table_scale);
}
}
}
}
cudaMemcpy(d_grid_value,grid_value,sizeof(cusp::complex<float>)*grid_size.x*grid_size.y,cudaMemcpyHostToDevice);
}
//---------
texture<float, 1, cudaReadModeElementType> texRef;
texture<int,1> tex_x_int;
texture<float,1> tex_x_float;
texture<float,1> tex_x_float1;
__inline__ __device__ cusp::complex<float> fetch_x(const int& i, const cusp::complex<float> * x)
{
return cusp::complex<float>(tex1Dfetch(tex_x_float, i*2),tex1Dfetch(tex_x_float, i*2+1));
}
__inline__ __device__ float fetch_x(const int& i,const float * x)
{
return tex1Dfetch(tex_x_float1, i);
}
void error_handle(cudaError_t status = cudaErrorLaunchFailure);
void error_handle(cudaError_t status){
if(status != cudaSuccess){
cudaError_t s= cudaGetLastError();
if(s != cudaSuccess){
// printf("%s\n",cudaGetErrorString(s));
exit(1);
}
}
}
__device__ float kb_weight(float2 grid_pos, float2 point_pos,
int kb_table_size, float kb_table_scale, const float * kb_table){
float dist_x = fabsf(grid_pos.x-point_pos.x)*kb_table_scale;
float dist_y = fabsf(grid_pos.y-point_pos.y)*kb_table_scale;
//float w=tex1D<float>(texRef,0.0f);
// return w;//tex1D<float>(texRef,dist_y);// *tex1D<float>(texRef,dist_y);
// return 1.0f;
int ix = (int)dist_x;
float fx = dist_x-ix;
int iy = (int)dist_y;
float fy = dist_y-iy;
if(ix+1 < kb_table_size && iy+1 < kb_table_size){
// return (tex1Dfetch<float>(texRef,ix)*(1.0f-fx) + tex1Dfetch<float>(texRef,ix+1)*(fx)) *
// (tex1Dfetch<float>(texRef,iy)*(1.0f-fy) + tex1Dfetch<float>(texRef,iy+1)*(fy));
return (fetch_x(ix,kb_table)*(1.0f-fx)+ fetch_x(ix+1,kb_table)*(fx))*
(fetch_x(iy,kb_table)*(1.0f-fy)+ fetch_x(iy+1,kb_table)*(fy));
}
return 0.0f;
/* */
}
__device__ float kb_weight(float grid_x, float grid_y, float point_pos_x,
float point_pos_y,
int kb_table_size,
float kb_table_scale, const float * kb_table){
float dist_x = fabsf(grid_x-point_pos_x)*kb_table_scale;
float dist_y = fabsf(grid_y-point_pos_y)*kb_table_scale;
// return tex1D<float>(texRef,dist_x) *tex1D<float>(texRef,dist_y);
int ix = (int)dist_x;
float fx = dist_x-ix;
int iy = (int)dist_y;
float fy = dist_y-iy;
if(ix+1 < kb_table_size && iy+1 < kb_table_size){
return (fetch_x(ix,kb_table)*(1.0f-fx)+ fetch_x(ix+1,kb_table)*(fx))*
(fetch_x(iy,kb_table)*(1.0f-fy)+ fetch_x(iy+1,kb_table)*(fy));
/*
return (tex1Dfetch<float>(texRef,ix)*(1.0f-fx) + tex1Dfetch<float>(texRef,ix+1)*(fx)) *
(tex1Dfetch<float>(texRef,iy)*(1.0f-fy) + tex1Dfetch<float>(texRef,iy+1)*(fy));
*/
}
return 0.0f;
}
__device__ float kb_weight(float2 grid_pos, float2 point_pos,
int kb_table_size,
float kb_table_scale, const float * kb_table,int tid){
float dist_x = fabsf(grid_pos.x-point_pos.x)*kb_table_scale;
float dist_y = fabsf(grid_pos.y-point_pos.y)*kb_table_scale;
// return 0.0f;
// return tex1D<float>(texRef,dist_x) *tex1D<float>(texRef,dist_y);
// return tex1D<float>(texRef,dist_x) *tex1D<float>(texRef,dist_y);
float ix = rintf(dist_x);
float fx = dist_x-ix;
float iy = rintf(dist_y);
float fy = dist_y-iy;
if(ix+1 < kb_table_size && iy+1 < kb_table_size){
return (fetch_x(ix,kb_table)*(1.0f-fx)+ fetch_x(ix+1,kb_table)*(fx))*
(fetch_x(iy,kb_table)*(1.0f-fy)+ fetch_x(iy+1,kb_table)*(fy));
/*
return (tex1Dfetch<float>(texRef,tid)*(1.0f-fx) + tex1Dfetch<float>(texRef,tid)*(fx)) *
(tex1Dfetch<float>(texRef,tid)*(1.0f-fy) + tex1Dfetch<float>(texRef,tid)*(fy));
*/
}
return 0.0f;
}
/*
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__global__ void sum_points( const cusp::complex<float> * point_value,
int npoints, uint2 grid_size,
const int * points_per_bin,
const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points,
const int * binned_points_idx,
const int * bin_location,
const float * binned_points_x,
const float * binned_points_y,
const int nbins,
const int kb_table_size,
const float kb_table_scale, const float * kb_table, cudaTextureObject_t texRef,
cusp::complex<float> * grid_value,int pbid){
__shared__ cusp::complex<float> value;
__shared__ cusp::complex<float> sum_t[BLOCKSIZE];
// Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float>
// int i = blockIdx.x;
int i = pbid;
int tid = threadIdx.x;
int jj = blockIdx.x;
uint2 corner;
corner.x = bin_location[i]%grid_size.x;
corner.y = bin_location[i]/grid_size.x;
const int idx = binned_points_idx[i];
const int ppb = points_per_bin[i];
// cusp::complex<float> * value;
const int bd=BLOCKSIZE;
// const int bd=blockDim.x;
//const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
int bdx=bin_dimension_x[i];
// loop through grid
for(int yi = corner.y;yi<corner.y+bin_dimension_x[i];yi+=1){
int y=(yi-corner.y+jj)%bdx+corner.y; //shift so that there is no overlap
// int y=yi;
for(int x = corner.x;x<corner.x+bin_dimension_y[i];x+=1){
sum_t[tid] = 0;
for(int j = tid+jj*bd ;j<ppb;j+=bd*gridDim.x){
sum_t[tid] += point_value[binned_points[idx+j]]*
kb_weight(make_float2(x,y),
make_float2(binned_points_x[idx+j],binned_points_y[idx+j]),
kb_table_size,kb_table_scale, kb_table,texRef);
}
__syncthreads();
for(unsigned int j=1; j < bd; j *= 2) {
// modulo arithmetic is slow!
if ((tid & (2*j-1)) == 0) { sum_t[tid] += sum_t[tid + j]; }
__syncthreads();
}
// cudaDeviceSynchronize();
if(tid == 0){
// grid_value[y*grid_size.x+x]+=(cusp::complex<float>) sum_t[0];
atomicAdd(&(grid_value[y*grid_size.x+x]),(sum_t[0]));
}
}
}
}
*/
//
// __device__ float kb_weight(float2 grid_pos, float2 point_pos,
// float * kb_table, int kb_table_size,
// float kb_table_scale){
// float dist_x = fabsf(grid_pos.x-point_pos.x)*kb_table_scale;
// float dist_y = fabsf(grid_pos.y-point_pos.y)*kb_table_scale;
// int ix = (int)dist_x;
// float fx = dist_x-rintf(dist_x);
// int iy = (int)dist_y;
// float fy = dist_y-rintf(dist_y);
//
// if(ix+1 < kb_table_size && iy+1 < kb_table_size){
// return (kb_table[ix]*(1.0f-fx) + kb_table[ix+1]*(fx)) *
// (kb_table[iy]*(1.0f-fy) + kb_table[iy+1]*(fy));
// }
// return 0.0f;
// }
/*
__global__ void grid_points_cuda_mex_interleaved_kernel(const float * point_x,
const float * point_y,
const cusp::complex<float> * point_value,
int npoints, uint2 grid_size,
const int * points_per_bin,
const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points,
const int * binned_points_idx,
const int * bin_location,
const float * binned_points_x,
const float * binned_points_y,
int nbins,
int kb_table_size,
float kb_table_scale,
cusp::complex<float> * grid_value){
// Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float>
//typedef cub::BlockReduce<cusp::complex<float>, 128> BlockReduce;
// Allocate shared memory for BlockReduce
//__shared__ typename BlockReduce::TempStorage temp_storage;
//int aggregate = BlockReduce(temp_storage).Sum(thread_data);
int i = blockIdx.x;
int tid = threadIdx.x;
uint2 corner;
corner.x = bin_location[i]%grid_size.x;
corner.y = bin_location[i]/grid_size.x;
int idx = binned_points_idx[i];
__shared__ float point_pos_cache_x[SHARED_SIZE];
__shared__ float point_pos_cache_y[SHARED_SIZE];
__shared__ cusp::complex<float> point_value_cache[SHARED_SIZE];
__shared__ cusp::complex<float> sum_t[BLOCKSIZE];
// small bin or large no of samples
if(bin_dimension_x[i]*bin_dimension_y[i] < 64 || points_per_bin[i] > SHARED_SIZE){
sum_t[tid] = 0;
// loop through grid
for(int y = corner.y;y<corner.y+bin_dimension_x[i];y+=1){
for(int x = corner.x;x<corner.x+bin_dimension_y[i];x+=1){
sum_t[tid] = 0;
// loop through points
for(int j = tid;j<points_per_bin[i];j+=blockDim.x){
sum_t[tid] += point_value[binned_points[idx+j]]*kb_weight(make_float2(x,y),
make_float2(binned_points_x[idx+j],
binned_points_y[idx+j]),
kb_table_size,kb_table_scale);
}
// Do a reduce in shared memory //
for(unsigned int j=1; j < blockDim.x; j *= 2) {
// modulo arithmetic is slow!
if ((tid & (2*j-1)) == 0) {
sum_t[tid] += sum_t[tid + j];
}
__syncthreads();
}
if(tid == 0){
grid_value[y*grid_size.x+x] = sum_t[0];
}
}
}
// large dimensions
}else if(bin_dimension_x[i]*bin_dimension_y[i] >BLOCKSIZE/2-1) {
// Lets try to load all points to shared memory /
const int ppb = points_per_bin[i];
for(int j = tid;j<ppb;j+= blockDim.x){
const int point = binned_points[idx+j];
point_value_cache[j] = point_value[point];
point_pos_cache_x[j] = binned_points_x[idx+j];
point_pos_cache_y[j] = binned_points_y[idx+j];
}
__syncthreads();
const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
// loop through dimensions
for(int k = tid;k<dims.x*dims.y;k+=blockDim.x){
const int x = (k%(dims.x))+corner.x;
const int y = (k/dims.x)+corner.y;
cusp::complex<float> my_sum = 0;
for(int j = 0;j<ppb;j++){ //loop through all the points
float w= kb_weight(x,y,point_pos_cache_x[j],point_pos_cache_y[j],kb_table_size,kb_table_scale);
my_sum += point_value_cache[j]*w;
}
grid_value[y*grid_size.x+x] = my_sum;
}
}else{ //small dimension and few points
// Lets try to load things to shared memory /
const int ppb = points_per_bin[i];
for(int j = tid;j<ppb;j+= blockDim.x){
const int point = binned_points[idx+j];
point_value_cache[j] = point_value[point];
point_pos_cache_x[j] = binned_points_x[idx+j];
point_pos_cache_y[j] = binned_points_y[idx+j];
}
__syncthreads();
const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
int b = 4;
for(int k = tid/b;k<dims.x*dims.y;k+=blockDim.x/b){
const int x = (k%(dims.x))+corner.x;
const int y = (k/dims.x)+corner.y;
sum_t[tid] = 0;
//sum_i[tid] = 0;
for(int j = (tid&(b-1));j<ppb;j+=b){
float w= kb_weight(x,y,point_pos_cache_x[j],point_pos_cache_y[j],kb_table_size,kb_table_scale);
sum_t[tid] += point_value_cache[j]*w;
}
// Do a reduce in shared memory
for(unsigned int j=1; j < b; j = (j << 1)) {
// modulo arithmetic is slow!
if ((tid & ((j<<1)-1)) == 0) {
sum_t[tid] += sum_t[tid + j];
}
__syncthreads();
}
if((tid&(b-1)) == 0){
grid_value[y*grid_size.x+x] = sum_t[tid];
}
}
}
}
*/
//------------------------------
//point_value[binned_points[idx+j]]*kb_weight(make_float2(x,y),
// make_float2(binned_points_x[idx+j],
// binned_points_y[idx+j]),
// kb_table_size,kb_table_scale);
//call=
//transform_4in1out( binned_points_x,binned_points_y,binned_points,point_value,make_float2(x,y), kb_table_size,kb_table_scale);
/*
template <typename IN, typename OUT>
struct KBMUL
{
float xs;
float ys;
int kb_table_size;
float kb_table_scale;
KBMUL(float _xs, float _ys){
xs = _xs;
ys= _ys;
kb_table_size=_kb_table_size;
kb_table_scale=_kb_table_scale;
}
template <typename Tuple>
__host__ __device__
OUT operator()(Tuple x)
{
// OUT out;
IN point_value= thrust::get<0>(x);
float binned_points_x= thrust::get<1>(x);
float binned_points_y= thrust::get<2>(x);
OUT ret = point_value[binned_points]*kb_weight(make_float2(xs,ys),
make_float2(binned_points_x,binned_points_y),
kb_table_size,kb_table_scale);
return ret*ret;
}
};
*/
//------------------------
/*
* sum_t[tid] += point_value[binned_points[idx+j]]*kb_weight(make_float2(x,y),
make_float2(binned_points_x[idx+j],
binned_points_y[idx+j]),
kb_table_size,kb_table_scale);
* template <typename T1,typename T2>
* struct AbsSubtract2 : public thrust::unary_function<T1,T2>
{
__host__ __device__
T2 operator()(T1 x)
{
T2 ret = abs(abs(thrust::get<0>(x))-(thrust::get<1>(x)));
return ret*ret;
}
};
*/
//=========================
/*
template<typename IN,typename OUT>
void transform_4in_1out(float * BINNED_POINTS_X, float * BINNED_POINTS_Y, int BINNED_POINTS, IN
* POINT_VALUE, float2 * POSITIONS,
int kb_table_size, float kb_table_scale, float * KBTABLE, OUT * derr, int N){
thrust::device_ptr<float> d_binned_points_x(BINNED_POINTS_X);
thrust::device_ptr<float> d_binned_points_y(BINNED_POINTS_Y);
thrust::device_ptr<int> d_binned_points(BINNED_POINTS);
thrust::device_ptr<float> d_value(POINT_VALUE);
thrust::device_ptr<float> d_positions(POSITIONS);
}
// transform_3in_2out(d_G,d_DG,d_a, (float ) tau, &der,&d2er,n);
// transform_3in_1out(d_G,d_DG,d_a, (float ) tau, &der,n);
template<typename IN,typename OUT>
// void transform_3in_2out(IN * G, IN * dG, float * F, float tau, OUT * derr, OUT * d2err, int N){
void transform_3in_2out(IN * G, IN * dG, float * F, float tau, OUT * derr, OUT * d2err, int N){
thrust::device_ptr<IN> d_G(G);
thrust::device_ptr<IN> d_dG(dG);
thrust::device_ptr<float> d_F(F);
thrust::tuple<OUT,OUT> init;
thrust::tuple<OUT,OUT> out = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(d_G, d_dG, d_F)),
thrust::make_zip_iterator(thrust::make_tuple(d_G, d_dG, d_F))+N,
DIR<IN,OUT>(tau),
init,
TUPLE_PLUS<thrust::tuple<OUT,OUT> >());
*derr = thrust::get<0>(out)*2;
*d2err = thrust::get<1>(out)*2;
}
*/
//--------------------------------
__global__ void grid_points_cuda_mex_interleaved_kernel1(const float * point_x,
const float * point_y,
const cusp::complex<float> * point_value,
int npoints, uint2 grid_size,
const int * points_per_bin,
const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points,
const int * binned_points_idx,
const int * bin_location,
const float * binned_points_x,
const float * binned_points_y,
int nbins,
int kb_table_size,
float kb_table_scale, const float * kb_table,
cusp::complex<float> * grid_value){
__shared__ float point_pos_cache_x[SHARED_SIZE];
__shared__ float point_pos_cache_y[SHARED_SIZE];
__shared__ cusp::complex<float> point_value_cache[SHARED_SIZE];
__shared__ cusp::complex<float> sum_t[BLOCKSIZE];
int i = blockIdx.x;
int tid = threadIdx.x;
uint2 corner;
corner.x = bin_location[i]%grid_size.x;
corner.y = bin_location[i]/grid_size.x;
int idx = binned_points_idx[i];
const int ppb = points_per_bin[i];
// small bin or large no of samples
if(bin_dimension_x[i]*bin_dimension_y[i] < 64 || points_per_bin[i] > SHARED_SIZE){
// loop through grid
for(int y = corner.y;y<corner.y+bin_dimension_x[i];y+=1){
for(int x = corner.x;x<corner.x+bin_dimension_y[i];x+=1){
// sum_points<<<1,BLOCKSIZE>>> (point_value,binned_points,binned_points_x,binned_points_y,idx,points_per_bin[idx],x,y,kb_table_size,kb_table_scale, value);
// cusp::complex<float> value[1];
//grid_value[y*grid_size.x+x]=0;
// sum_points<<<1,BLOCKSIZE>>> (point_value,binned_points,binned_points_x,binned_points_y,idx,ppb,x,y,kb_table_size,kb_table_scale,grid_value+y*grid_size.x+x);
sum_t[tid] = 0;
// Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float>
//typedef cub::BlockReduce<cusp::complex<float>, 128> BlockReduce;
// Allocate shared memory for BlockReduce
//__shared__ typename BlockReduce::TempStorage temp_storage;
// grid_value[y*grid_size.x+x]= BlockReduce(temp_storage).Sum(thread_data);
// for(int item=0; item<ITEMS_PER_THREAD; ++item)
// data[item] = unaryOp(data[item]);
sum_t[tid] = 0;
// loop through points
for(int j = tid;j<ppb;j+=blockDim.x){
sum_t[tid] += point_value[binned_points[idx+j]]*kb_weight(make_float2(x,y),
make_float2(binned_points_x[idx+j],
binned_points_y[idx+j]),
kb_table_size,kb_table_scale,kb_table );
}
// Do a reduce in shared memory
for(unsigned int j=1; j < blockDim.x; j *= 2) {
// modulo arithmetic is slow!
if ((tid & (2*j-1)) == 0) {
sum_t[tid] += sum_t[tid + j];
}
__syncthreads();
}
if(tid == 0){
grid_value[y*grid_size.x+x] = sum_t[0];
}
}
}
// large dimensions
}else if(bin_dimension_x[i]*bin_dimension_y[i] >BLOCKSIZE/2-1) {
/* Lets try to load all points to shared memory */
const int ppb = points_per_bin[i];
for(int j = tid;j<ppb;j+= blockDim.x){
const int point = binned_points[idx+j];
point_value_cache[j] = point_value[point];
point_pos_cache_x[j] = binned_points_x[idx+j];
point_pos_cache_y[j] = binned_points_y[idx+j];
}
__syncthreads();
const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
// loop through dimensions
for(int k = tid;k<dims.x*dims.y;k+=blockDim.x){
const int x = (k%(dims.x))+corner.x;
const int y = (k/dims.x)+corner.y;
cusp::complex<float> my_sum = 0;
for(int j = 0;j<ppb;j++){ //loop through all the points
float w= kb_weight(x,y,point_pos_cache_x[j],point_pos_cache_y[j],kb_table_size,kb_table_scale, kb_table);
my_sum += point_value_cache[j]*w;
}
grid_value[y*grid_size.x+x] = my_sum;
}
}else{ //small dimension and few points
/* Lets try to load things to shared memory */
const int ppb = points_per_bin[i];
for(int j = tid;j<ppb;j+= blockDim.x){
const int point = binned_points[idx+j];
point_value_cache[j] = point_value[point];
point_pos_cache_x[j] = binned_points_x[idx+j];
point_pos_cache_y[j] = binned_points_y[idx+j];
}
__syncthreads();
const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
int b = 4;
for(int k = tid/b;k<dims.x*dims.y;k+=blockDim.x/b){
const int x = (k%(dims.x))+corner.x;
const int y = (k/dims.x)+corner.y;
sum_t[tid] = 0;
//sum_i[tid] = 0;
for(int j = (tid&(b-1));j<ppb;j+=b){
float w= kb_weight(x,y,point_pos_cache_x[j],point_pos_cache_y[j],kb_table_size,kb_table_scale,kb_table);
sum_t[tid] += point_value_cache[j]*w;
}
/* Do a reduce in shared memory */
for(unsigned int j=1; j < b; j = (j << 1)) {
// modulo arithmetic is slow!
if ((tid & ((j<<1)-1)) == 0) {
sum_t[tid] += sum_t[tid + j];
}
__syncthreads();
}
if((tid&(b-1)) == 0){
grid_value[y*grid_size.x+x] = sum_t[tid];
}
}
}
}
void grid_points_cuda_interleaved_mex(const float * point_pos_x, const float * point_pos_y,
const cusp::complex<float> * point_value, int npoints,
uint2 grid_size, const int * points_per_bin, const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points, const int * binned_points_idx, const int * bin_location,
const float * binned_points_x, const float * binned_points_y,
int nbins,
const float * kb_table,
int kb_table_size,
float kb_table_scale,
cusp::complex<float> * grid_value){
cudaMemset(grid_value,0,sizeof(float2)*grid_size.x*grid_size.y);
size_t offset;
cudaBindTexture(&offset,texRef, kb_table, sizeof(float)*kb_table_size);
if(offset != 0){
// printf("Error: Texture offset different than zero. Table not allocated with cudaMalloc!%d\n");
return;
}
int grid = nbins;
int block_size = BLOCKSIZE;
clock_t t_i = clock();
grid_points_cuda_mex_interleaved_kernel1<<<grid,block_size>>>( point_pos_x, point_pos_y,
point_value, npoints, grid_size, points_per_bin,
bin_dimension_x, bin_dimension_y, binned_points,
binned_points_idx, bin_location,
binned_points_x, binned_points_y,nbins,
kb_table_size,
kb_table_scale, kb_table, grid_value);
cudaThreadSynchronize();
clock_t t_e = clock();
error_handle();
// printf("%d iter in %5.1f ms\n",iter,(t_e-t_i)*1000.0/CLOCKS_PER_SEC);
}
void compare_to_gold(float * gridded, float * gold_gridded, uint2 grid_size){
for(int i =0;i<grid_size.x*grid_size.y;i++){
if(fabs(gridded[i]-gold_gridded[i])/gridded[i] > 1e-5 &&
fabs(gridded[i]-gold_gridded[i]) > 1e-7){
printf("cuda[%d] = %e gold[%d] = %e\n",i,gridded[i],i,gold_gridded[i]);
exit(1);
}
}
}
//---------
#define SX prhs[0]
#define SY prhs[1]
#define SV prhs[2]
#define GRID_DIM prhs[3]
#define SPB prhs[4]
#define BIN_DIM_X prhs[5]
#define BIN_DIM_Y prhs[6]
#define SIB prhs[7]
#define BSO prhs[8]
#define BL prhs[9]
#define BPX prhs[10]
#define BPY prhs[11]
#define KLUT prhs[12]
#define KLUTS prhs[13]
void mexFunction(int nlhs, mxArray * plhs[], int nrhs,const mxArray * prhs[]){
/* Initialize the MathWorks GPU API. */
mxInitGPU();
mxGPUArray const *samples_x;
mxGPUArray const *samples_y;
mxGPUArray const *samples_values;
mxGPUArray const *samples_per_bin;
mxGPUArray const *bin_dimensions_x;
mxGPUArray const *bin_dimensions_y;
mxGPUArray const *samples_in_bin;
mxGPUArray const *bin_start_offset;
mxGPUArray const *bin_location;
mxGPUArray const *bin_points_x;
mxGPUArray const *bin_points_y;
mxGPUArray const *kernel_lookup_table;
//int *grid_dim =(int *) mxGetPr(GRID_DIM);
float kernel_lookup_table_scale = mxGetScalar(KLUTS);
int *grid_dim0=( int *) (mxGetData(GRID_DIM));
mwSize *grid_dim=(mwSize *)grid_dim0;
//mexPrintf("Grid Dimensions %d x %d\n",(grid_dim[0]),(grid_dim[1]));
//mexPrintf("Grid Dimensions %d x %d\n",(grid_dim1[0]),(grid_dim1[1]));
// OUTPUT
mxGPUArray *grid_values, *gold_grid_values;
samples_x = mxGPUCreateFromMxArray(SX);
samples_y = mxGPUCreateFromMxArray(SY);
samples_values = mxGPUCreateFromMxArray(SV);
samples_per_bin = mxGPUCreateFromMxArray(SPB);
bin_dimensions_x = mxGPUCreateFromMxArray(BIN_DIM_X);
bin_dimensions_y = mxGPUCreateFromMxArray(BIN_DIM_Y);
samples_in_bin = mxGPUCreateFromMxArray(SIB);
bin_start_offset = mxGPUCreateFromMxArray(BSO);
bin_location = mxGPUCreateFromMxArray(BL);
bin_points_x = mxGPUCreateFromMxArray(BPX);
bin_points_y = mxGPUCreateFromMxArray(BPY);
kernel_lookup_table= mxGPUCreateFromMxArray(KLUT);
int nbins = (int) (mxGPUGetNumberOfElements(bin_dimensions_x));
int npoints = (int)(mxGPUGetNumberOfElements(samples_x));
int kernel_lookup_table_size = ( int)(mxGPUGetNumberOfElements(kernel_lookup_table));
mwSize ndim= 2;
// mwSize *grid_dim1[]={(mwSize grid_dim[0]), }
// output:
// float2 * grid_values;
// float2 * gold_grid_values;
// plhs[0] = jkt_new( grid_dim[0], grid_dim[1], mxSINGLE_CLASS, mxREAL,);
//grid_values= mxGPUCreateGPUArray(ndim,grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
grid_values= mxGPUCreateGPUArray(ndim, grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
// now get the pointer or whatever it is
const float *d_samples_x = (const float *)(mxGPUGetDataReadOnly(samples_x));
const float *d_samples_y = (const float *)(mxGPUGetDataReadOnly(samples_y));
// float2 *d_samples_values = (float2 *)(const float2 *)(mxGPUGetDataReadOnly(samples_values));
const cusp::complex<float> *d_samples_values = (const cusp::complex<float> *)(mxGPUGetDataReadOnly(samples_values));
const int * d_samples_per_bin = (const int *)(mxGPUGetDataReadOnly(samples_per_bin));
const int * d_bin_dimensions_x = (const int *)(mxGPUGetDataReadOnly(bin_dimensions_x));
const int * d_bin_dimensions_y = (const int *)(mxGPUGetDataReadOnly(bin_dimensions_y));
const int * d_samples_in_bin = (const int *)(mxGPUGetDataReadOnly(samples_in_bin));
const int * d_bin_start_offset =(const int *)(mxGPUGetDataReadOnly(bin_start_offset));
const int * d_bin_location = (const int *)(mxGPUGetDataReadOnly(bin_location));
const float * d_bin_points_x = (const float *)(mxGPUGetDataReadOnly(bin_points_x));
const float * d_bin_points_y = (const float *)(mxGPUGetDataReadOnly(bin_points_y));
const float * d_kernel_lookup_table = (const float *)(mxGPUGetDataReadOnly(kernel_lookup_table));
const uint2 grid_size = {grid_dim[0],grid_dim[1]};
//float2 * d_grid_values = (float2 *)(mxGPUGetData(grid_values));
cusp::complex<float> * d_grid_values = (cusp::complex<float> *)(mxGPUGetData(grid_values));
// mexErrMsgTxt("gpuArray 1");
grid_points_cuda_interleaved_mex( d_samples_x, d_samples_y,
d_samples_values, npoints,
grid_size, d_samples_per_bin, d_bin_dimensions_x, d_bin_dimensions_y,
d_samples_in_bin, d_bin_start_offset, d_bin_location,
d_bin_points_x, d_bin_points_y,
nbins, d_kernel_lookup_table,
kernel_lookup_table_size,
kernel_lookup_table_scale,
d_grid_values);
//mexErrMsgTxt("gpuArray 2");
plhs[0] = mxGPUCreateMxArrayOnGPU(grid_values);
if(nlhs == 2){
//gold_grid_values= mxGPUCreateGPUArray(ndim, grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
gold_grid_values= mxGPUCreateGPUArray(ndim, grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_INITIALIZE_VALUES);
//gold_grid_values= mxGPUCreateGPUArray(ndim,grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
//float2 * d_gold_grid_values = (float2 *)(mxGPUGetData(gold_grid_values));
cusp::complex<float> * d_gold_grid_values = ( cusp::complex<float> *)(mxGPUGetData(gold_grid_values));
grid_points_gold (d_samples_x, d_samples_y,
d_samples_values, npoints,
grid_size, d_samples_per_bin, d_samples_in_bin, d_bin_start_offset, d_bin_location,
d_bin_dimensions_x, d_bin_dimensions_y,nbins, d_kernel_lookup_table, kernel_lookup_table_size,
kernel_lookup_table_scale,d_gold_grid_values);
plhs[1] = mxGPUCreateMxArrayOnGPU(gold_grid_values);
mxGPUDestroyGPUArray( gold_grid_values);
}
mxGPUDestroyGPUArray( samples_x);
mxGPUDestroyGPUArray( samples_y);
mxGPUDestroyGPUArray( samples_values);
mxGPUDestroyGPUArray( samples_per_bin);
mxGPUDestroyGPUArray( bin_dimensions_x);
mxGPUDestroyGPUArray( bin_dimensions_y);
mxGPUDestroyGPUArray( samples_in_bin);
mxGPUDestroyGPUArray( kernel_lookup_table);
mxGPUDestroyGPUArray( bin_start_offset);
mxGPUDestroyGPUArray( bin_location);
mxGPUDestroyGPUArray( bin_points_x);
mxGPUDestroyGPUArray( bin_points_y);
mxGPUDestroyGPUArray( grid_values);
}
|
905f305ea254e9e26a023a6bf27ef567e741d6fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <cstring> // memset
#include <cstdlib> // rand, RAND_MAX
#include <cmath> // sqrtf
#include <string>
#include <vector>
#include "cuPrintf_hip.cuh"
#include "cuPrintf.hip"
using namespace std;
using namespace std;
float randomf(){
return (rand()+0.5)/(RAND_MAX+1.0);
}
static double get_time(){
timespec tp;
clock_gettime(CLOCK_MONOTONIC,&tp);
return tp.tv_sec+tp.tv_nsec*1e-9;
}
// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx (b,m,nsample)
__global__ void query_ball_point_gpu(int b, int n, int m, const float* radius, int nsample, const float *xyz1, const float *xyz2, int *idx) {
for (int i=0;i<b;++i) {
for (int j=0;j<m;++j) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius[0]) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
}
xyz1+=n*3;
xyz2+=m*3;
idx+=m*nsample;
}
}
// input: points (b,n,c), idx (b,m,nsample)
// output: out (b,m,nsample,c)
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
for (int i=0;i<b;++i) {
for (int j=0;j<m;++j) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
points+=n*c;
idx+=m*nsample;
out+=m*nsample*c;
}
}
// input: grad_out (b,m,nsample,c), idx (b,m,nsample),
// output: grad_points (b,n,c)
__global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) {
for (int i=0;i<b;++i) {
for (int j=0;j<m;++j) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
grad_points[ii*c+l] += grad_out[j*nsample*c+k*c+l];
}
}
}
idx+=m*nsample;
grad_out+=m*nsample*c;
grad_points+=n*c;
}
}
| 905f305ea254e9e26a023a6bf27ef567e741d6fa.cu | #include <cstdio>
#include <ctime>
#include <cstring> // memset
#include <cstdlib> // rand, RAND_MAX
#include <cmath> // sqrtf
#include <string>
#include <vector>
#include "cuPrintf.cuh"
#include "cuPrintf.cu"
using namespace std;
using namespace std;
float randomf(){
return (rand()+0.5)/(RAND_MAX+1.0);
}
static double get_time(){
timespec tp;
clock_gettime(CLOCK_MONOTONIC,&tp);
return tp.tv_sec+tp.tv_nsec*1e-9;
}
// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx (b,m,nsample)
__global__ void query_ball_point_gpu(int b, int n, int m, const float* radius, int nsample, const float *xyz1, const float *xyz2, int *idx) {
for (int i=0;i<b;++i) {
for (int j=0;j<m;++j) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius[0]) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
}
xyz1+=n*3;
xyz2+=m*3;
idx+=m*nsample;
}
}
// input: points (b,n,c), idx (b,m,nsample)
// output: out (b,m,nsample,c)
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
for (int i=0;i<b;++i) {
for (int j=0;j<m;++j) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
points+=n*c;
idx+=m*nsample;
out+=m*nsample*c;
}
}
// input: grad_out (b,m,nsample,c), idx (b,m,nsample),
// output: grad_points (b,n,c)
__global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) {
for (int i=0;i<b;++i) {
for (int j=0;j<m;++j) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
grad_points[ii*c+l] += grad_out[j*nsample*c+k*c+l];
}
}
}
idx+=m*nsample;
grad_out+=m*nsample*c;
grad_points+=n*c;
}
}
|
a33593b93c9b70644bb98df014a0fd31aefeafa6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "header2.cuh"
#define MAX_THREADS 1024
#define MAX_BLOCKS 30
#define MAX_PERMS 5041
#define CUDA_RUN(x_) {hipError_t cudaStatus = x_; if (cudaStatus != hipSuccess) {fprintf(stderr, "Error %d - %s\n", cudaStatus, hipGetErrorString(cudaStatus)); goto Error;}}
#define SAFE(x_) {if((x_) == NULL) printf("out of memory. %d\n", __LINE__);}
__device__ __shared__ int32_t shared_cost;
// Quick implementation of factorial of a number
__host__ unsigned long long factorial(int32_t n) {
int c;
unsigned long long result = 1;
for (c = 1; c <= n; c++)
result = result * c;
return result;
}
int main(int argc, char* argv[]) {
// Two arguments need to be specified at least. Name of the programm itself and number of nodes to compute.
if (argc < 2) return 0;
int size8 = sizeof(int8_t);
int size32 = sizeof(int32_t);
unsigned long long total_permutations, thread_perms, num_blocks = 1, num_threads, num_kernels = 1;
float time_passed;
hipEvent_t startEvent, stopEvent;
/* Host variables */
int8_t* nodes, * shortestPath, * graphWeights, * choices;
int32_t size = atoi(argv[1]), * cost;
int8_t selected_K = 0;
unsigned long long threads_per_kernel;
/* Device variables */
int8_t* dev_nodes_ids, * dev_shortestPath, * dev_graphWeights, * dev_choices;
int32_t* dev_cost, * dev_size;
int8_t* dev_selected_K;
unsigned long long* dev_threads_per_kernel;
total_permutations = factorial(size - 1); // Number of combinations to be computed is (N-1)! where is N is the number of nodes.
printf("factorial(%d): %llu\n", size - 1, total_permutations);
// Calculation of what is the max number of permutations per thread possible without exceeding MAX_PERMS
for (selected_K = 1; selected_K < size - 2; selected_K++) {
thread_perms = factorial(size - 1 - selected_K);
if (thread_perms < MAX_PERMS) break;
}
// Calculation of how many threads do we need based on the permutations per thread and the total number of permutations to be processed.
num_threads = total_permutations / thread_perms;
// If threads exceed the maximum, they will be equally distributed in different blocks
int k;
while (num_threads > MAX_THREADS) {
k = 2;
while (num_threads % k != 0) k++;
num_threads /= k;
num_blocks *= k;
}
// If blocks exceed the maximum, they will be equally distributed in different kernels
while (num_blocks > MAX_BLOCKS) {
k = 2;
while (num_blocks % k != 0) k++;
num_blocks /= k;
num_kernels *= k;
}
threads_per_kernel = num_blocks * num_threads;
// Print problem configuration
printf("K selected: %d\n", selected_K);
printf("num_threads %llu thread_perms %llu num_blocks %llu num_kernels %llu threads_per_kernel %llu\n", num_threads, thread_perms, num_blocks, num_kernels, threads_per_kernel);
dim3 block_dim(num_threads, 1, 1);
dim3 grid_dim(num_blocks, 1, 1);
// Memory allocations with SAFE macro in case one of them fails due to memory not being able.
SAFE(nodes = (int8_t*)malloc(size * size8));
SAFE(shortestPath = (int8_t*)calloc(num_blocks * size, size8));
SAFE(graphWeights = (int8_t*)malloc(size * size8 * size));
SAFE(cost = (int32_t*)calloc(num_blocks * size, size32));
SAFE(choices = (int8_t*)malloc(threads_per_kernel * size * size8));
// Device memory allocation for data in the device (GPU)
CUDA_RUN(hipMalloc((void**)&dev_nodes_ids, size * size8));
CUDA_RUN(hipMalloc((void**)&dev_shortestPath, size * size8 * num_blocks));
CUDA_RUN(hipMalloc((void**)&dev_graphWeights, size * size8 * size));
CUDA_RUN(hipMalloc((void**)&dev_cost, num_blocks * size32));
CUDA_RUN(hipMalloc((void**)&dev_size, size32));
CUDA_RUN(hipMalloc((void**)&dev_selected_K, size8));
CUDA_RUN(hipMalloc((void**)&dev_choices, threads_per_kernel * size * size8));
CUDA_RUN(hipMalloc((void**)&dev_threads_per_kernel, sizeof(unsigned long long)));
srand(time(NULL));
initialize(nodes, graphWeights, size);
// Translation of the data from host (CPU) to the device (GPU)
CUDA_RUN(hipMemcpy(dev_nodes_ids, nodes, size * size8, hipMemcpyHostToDevice));
CUDA_RUN(hipMemcpy(dev_shortestPath, shortestPath, size * size8 * num_blocks, hipMemcpyHostToDevice));
CUDA_RUN(hipMemcpy(dev_graphWeights, graphWeights, size * size8 * size, hipMemcpyHostToDevice));
CUDA_RUN(hipMemcpy(dev_size, &size, size32, hipMemcpyHostToDevice));
CUDA_RUN(hipMemcpy(dev_selected_K, &selected_K, size8, hipMemcpyHostToDevice));
CUDA_RUN(hipMemcpy(dev_choices, choices, threads_per_kernel * size * size8, hipMemcpyHostToDevice));
CUDA_RUN(hipMemcpy(dev_threads_per_kernel, &threads_per_kernel, sizeof(unsigned long long), hipMemcpyHostToDevice));
CUDA_RUN(hipMemcpy(dev_cost, cost, num_blocks * size32, hipMemcpyHostToDevice));
// Creation of time events to measure times
CUDA_RUN(hipEventCreate(&startEvent));
CUDA_RUN(hipEventCreate(&stopEvent));
CUDA_RUN(hipEventRecord(startEvent, 0));
// Kernels launching one by one
float percentage;
for (int i = 0; i < num_kernels; i++) {
// Assignment of combinations to each thread
find_permutations_for_threads << < 1, 1 >> > (dev_city_ids, dev_selected_K, dev_choices, dev_size, dev_threads_per_kernel);
CUDA_RUN(hipGetLastError());
CUDA_RUN(hipDeviceSynchronize());
// Total cost calculation of each thread paths
combinations_kernel << < grid_dim, block_dim >> > (dev_choices, dev_selected_K, dev_shortestPath, dev_graphWeights, dev_cost, dev_size);
CUDA_RUN(hipGetLastError());
CUDA_RUN(hipDeviceSynchronize());
// Printing progress out in the console
percentage = (100. / (float)num_kernels * (float)(i + 1));
printf("\rProgress : ");
for (int j = 0; j < 10; j++) {
if ((percentage / 10) / j > 1) printf("#");
else printf(" ");
}
printf(" [%.2f%%]", percentage);
fflush(stdout);
}
CUDA_RUN(hipEventRecord(stopEvent, 0));
CUDA_RUN(hipEventSynchronize(stopEvent));
CUDA_RUN(hipEventElapsedTime(&time_passed, startEvent, stopEvent));
CUDA_RUN(hipMemcpy(shortestPath, dev_shortestPath, num_blocks * size * size8, hipMemcpyDeviceToHost));
CUDA_RUN(hipMemcpy(cost, dev_cost, num_blocks * size32, hipMemcpyDeviceToHost));
printf("\nTime passed: %3.1f ms \n", time_passed);
print_Graph(graphWeights, size);
// Search of the block with lowest cost path
{
int32_t min = cost[0];
int8_t index = 0;
for (int i = 1; i < num_blocks; i++) {
if (cost[i] < min) {
min = cost[i];
index = i;
}
}
printf("Shortest path found on block #%d:\n", index + 1);
print_ShortestPath(&shortestPath[index * size], min, size);
}
Error: // In case of error, memory liberation
free(nodes);
free(shortestPath);
free(graphWeights);
free(cost);
free(choices);
hipFree(dev_nodes_ids);
hipFree(dev_shortestPath);
hipFree(dev_graphWeights);
hipFree(dev_cost);
hipFree(dev_size);
hipFree(dev_selected_K);
hipFree(dev_choices);
hipFree(dev_threads_per_kernel);
hipEventDestroy(startEvent);
hipEventDestroy(stopEvent);
getchar();
return 0;
}
__global__
void find_permutations_for_threads(int8_t* city_ids, int8_t* k, int8_t* choices, int32_t* size, unsigned long long* threads_per_kernel) {
int32_t length = *size;
int8_t index = 1;
unsigned long long count = 0;
for (count = 0; count < *threads_per_kernel; count++) {
for (int i = 0; i < length; i++) {
choices[i + count * length] = city_ids[i];
}
reverse(city_ids + *k + index, city_ids + length);
next_permutation(city_ids + index, city_ids + length);
}
}
__global__
void combinations_kernel(int8_t* choices, int8_t* k, int8_t* shortestPath, int8_t* graphWeights, int32_t* cost, int32_t* size) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
int32_t length = *size;
int8_t index = 1;
/* local variables */
int8_t* _path, * _shortestPath;
int32_t _tcost;
SAFE(_path = (int8_t*)malloc(length * sizeof(int8_t)));
SAFE(_shortestPath = (int8_t*)malloc(length * sizeof(int8_t)));
_tcost = length * 100;
memcpy(_path, choices + tid * length, length * sizeof(int8_t));
memcpy(_shortestPath, shortestPath, length * sizeof(int8_t));
if (threadIdx.x == 0) {
if (cost[blockIdx.x] == 0) cost[blockIdx.x] = length * 100;
shared_cost = length * 100;
}
__syncthreads();
do {
coppy_array(_path, _shortestPath, &_tcost, graphWeights, length, tid);
} while (next_permutation(_path + *k + index, _path + length));
if (_tcost == shared_cost) {
atomicMin(&cost[blockIdx.x], _tcost);
if (cost[blockIdx.x] == _tcost) {
memcpy(shortestPath + blockIdx.x * length, _shortestPath, length * sizeof(int8_t));
}
}
free(_path);
free(_shortestPath);
}
__host__
void initialize(int8_t* city_ids, int8_t* graphWeights, int32_t size) {
for (int i = 0; i < size; i++) {
city_ids[i] = i;
for (int j = 0; j < size; j++) {
if (i == j)
graphWeights[i * size + j] = 0;
else
graphWeights[i * size + j] = 99;
}
}
for (int i = 0; i < size; i++) {
for (int j = 0; j < size;) {
int next = 1; // (rand() % 2) + 1;
int road = rand() % 100 + 1;
if (i == j) {
j += next;
continue;
}
graphWeights[i * size + j] = road;
printf("%d\t", graphWeights[i * size + j]);
j += next;
}
}
for (int i = size - 1; i >= 0; i--) {
graphWeights[((i + 1) % size) * size + i] = 1;
}
}
__host__
void print_Graph(int8_t* graphWeights, int32_t size) {
int i, j;
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
printf("%d\t", graphWeights[i * size + j]);
}
printf("\n");
}
}
__host__
void print_ShortestPath(int8_t* shortestPath, int32_t cost, int32_t size) {
int i;
if (cost == (size * 100)) printf("no possible path found.\n");
else {
for (i = 0; i < size; i++) {
printf("%d\t", shortestPath[i]);
}
printf("\nCost: %d\n", cost);
}
}
__device__
void swap(int8_t* x, int8_t* y) { int8_t tmp = *x; *x = *y; *y = tmp; }
__device__
void reverse(int8_t* first, int8_t* last) { while ((first != last) && (first != --last)) swap(first++, last); }
__device__
void coppy_array(int8_t* path, int8_t* shortestPath, int32_t* tcost, int8_t* weights, int8_t length, int tid) {
int32_t sum = 0;
for (int32_t i = 0; i < length; i++) {
int8_t val = weights[path[i] * length + path[(i + 1) % length]];
if (val == -1) return;
sum += val;
}
if (sum == 0) return;
atomicMin(&shared_cost, sum);
if (shared_cost == sum) {
*tcost = sum;
memcpy(shortestPath, path, length * sizeof(int32_t));
}
}
__device__
bool next_permutation(int8_t* first, int8_t* last) {
if (first == last) return false;
int8_t* i = first;
++i;
if (i == last) return false;
i = last;
--i;
for (;;) {
int8_t* ii = i--;
if (*i < *ii) {
int8_t* j = last;
while (!(*i < *--j));
swap(i, j);
reverse(ii, last);
return true;
}
if (i == first) {
reverse(first, last);
return false;
}
}
} | a33593b93c9b70644bb98df014a0fd31aefeafa6.cu | #include "header2.cuh"
#define MAX_THREADS 1024
#define MAX_BLOCKS 30
#define MAX_PERMS 5041
#define CUDA_RUN(x_) {cudaError_t cudaStatus = x_; if (cudaStatus != cudaSuccess) {fprintf(stderr, "Error %d - %s\n", cudaStatus, cudaGetErrorString(cudaStatus)); goto Error;}}
#define SAFE(x_) {if((x_) == NULL) printf("out of memory. %d\n", __LINE__);}
__device__ __shared__ int32_t shared_cost;
// Quick implementation of factorial of a number
__host__ unsigned long long factorial(int32_t n) {
int c;
unsigned long long result = 1;
for (c = 1; c <= n; c++)
result = result * c;
return result;
}
int main(int argc, char* argv[]) {
// Two arguments need to be specified at least. Name of the programm itself and number of nodes to compute.
if (argc < 2) return 0;
int size8 = sizeof(int8_t);
int size32 = sizeof(int32_t);
unsigned long long total_permutations, thread_perms, num_blocks = 1, num_threads, num_kernels = 1;
float time_passed;
cudaEvent_t startEvent, stopEvent;
/* Host variables */
int8_t* nodes, * shortestPath, * graphWeights, * choices;
int32_t size = atoi(argv[1]), * cost;
int8_t selected_K = 0;
unsigned long long threads_per_kernel;
/* Device variables */
int8_t* dev_nodes_ids, * dev_shortestPath, * dev_graphWeights, * dev_choices;
int32_t* dev_cost, * dev_size;
int8_t* dev_selected_K;
unsigned long long* dev_threads_per_kernel;
total_permutations = factorial(size - 1); // Number of combinations to be computed is (N-1)! where is N is the number of nodes.
printf("factorial(%d): %llu\n", size - 1, total_permutations);
// Calculation of what is the max number of permutations per thread possible without exceeding MAX_PERMS
for (selected_K = 1; selected_K < size - 2; selected_K++) {
thread_perms = factorial(size - 1 - selected_K);
if (thread_perms < MAX_PERMS) break;
}
// Calculation of how many threads do we need based on the permutations per thread and the total number of permutations to be processed.
num_threads = total_permutations / thread_perms;
// If threads exceed the maximum, they will be equally distributed in different blocks
int k;
while (num_threads > MAX_THREADS) {
k = 2;
while (num_threads % k != 0) k++;
num_threads /= k;
num_blocks *= k;
}
// If blocks exceed the maximum, they will be equally distributed in different kernels
while (num_blocks > MAX_BLOCKS) {
k = 2;
while (num_blocks % k != 0) k++;
num_blocks /= k;
num_kernels *= k;
}
threads_per_kernel = num_blocks * num_threads;
// Print problem configuration
printf("K selected: %d\n", selected_K);
printf("num_threads %llu thread_perms %llu num_blocks %llu num_kernels %llu threads_per_kernel %llu\n", num_threads, thread_perms, num_blocks, num_kernels, threads_per_kernel);
dim3 block_dim(num_threads, 1, 1);
dim3 grid_dim(num_blocks, 1, 1);
// Memory allocations with SAFE macro in case one of them fails due to memory not being able.
SAFE(nodes = (int8_t*)malloc(size * size8));
SAFE(shortestPath = (int8_t*)calloc(num_blocks * size, size8));
SAFE(graphWeights = (int8_t*)malloc(size * size8 * size));
SAFE(cost = (int32_t*)calloc(num_blocks * size, size32));
SAFE(choices = (int8_t*)malloc(threads_per_kernel * size * size8));
// Device memory allocation for data in the device (GPU)
CUDA_RUN(cudaMalloc((void**)&dev_nodes_ids, size * size8));
CUDA_RUN(cudaMalloc((void**)&dev_shortestPath, size * size8 * num_blocks));
CUDA_RUN(cudaMalloc((void**)&dev_graphWeights, size * size8 * size));
CUDA_RUN(cudaMalloc((void**)&dev_cost, num_blocks * size32));
CUDA_RUN(cudaMalloc((void**)&dev_size, size32));
CUDA_RUN(cudaMalloc((void**)&dev_selected_K, size8));
CUDA_RUN(cudaMalloc((void**)&dev_choices, threads_per_kernel * size * size8));
CUDA_RUN(cudaMalloc((void**)&dev_threads_per_kernel, sizeof(unsigned long long)));
srand(time(NULL));
initialize(nodes, graphWeights, size);
// Translation of the data from host (CPU) to the device (GPU)
CUDA_RUN(cudaMemcpy(dev_nodes_ids, nodes, size * size8, cudaMemcpyHostToDevice));
CUDA_RUN(cudaMemcpy(dev_shortestPath, shortestPath, size * size8 * num_blocks, cudaMemcpyHostToDevice));
CUDA_RUN(cudaMemcpy(dev_graphWeights, graphWeights, size * size8 * size, cudaMemcpyHostToDevice));
CUDA_RUN(cudaMemcpy(dev_size, &size, size32, cudaMemcpyHostToDevice));
CUDA_RUN(cudaMemcpy(dev_selected_K, &selected_K, size8, cudaMemcpyHostToDevice));
CUDA_RUN(cudaMemcpy(dev_choices, choices, threads_per_kernel * size * size8, cudaMemcpyHostToDevice));
CUDA_RUN(cudaMemcpy(dev_threads_per_kernel, &threads_per_kernel, sizeof(unsigned long long), cudaMemcpyHostToDevice));
CUDA_RUN(cudaMemcpy(dev_cost, cost, num_blocks * size32, cudaMemcpyHostToDevice));
// Creation of time events to measure times
CUDA_RUN(cudaEventCreate(&startEvent));
CUDA_RUN(cudaEventCreate(&stopEvent));
CUDA_RUN(cudaEventRecord(startEvent, 0));
// Kernels launching one by one
float percentage;
for (int i = 0; i < num_kernels; i++) {
// Assignment of combinations to each thread
find_permutations_for_threads << < 1, 1 >> > (dev_city_ids, dev_selected_K, dev_choices, dev_size, dev_threads_per_kernel);
CUDA_RUN(cudaGetLastError());
CUDA_RUN(cudaDeviceSynchronize());
// Total cost calculation of each thread paths
combinations_kernel << < grid_dim, block_dim >> > (dev_choices, dev_selected_K, dev_shortestPath, dev_graphWeights, dev_cost, dev_size);
CUDA_RUN(cudaGetLastError());
CUDA_RUN(cudaDeviceSynchronize());
// Printing progress out in the console
percentage = (100. / (float)num_kernels * (float)(i + 1));
printf("\rProgress : ");
for (int j = 0; j < 10; j++) {
if ((percentage / 10) / j > 1) printf("#");
else printf(" ");
}
printf(" [%.2f%%]", percentage);
fflush(stdout);
}
CUDA_RUN(cudaEventRecord(stopEvent, 0));
CUDA_RUN(cudaEventSynchronize(stopEvent));
CUDA_RUN(cudaEventElapsedTime(&time_passed, startEvent, stopEvent));
CUDA_RUN(cudaMemcpy(shortestPath, dev_shortestPath, num_blocks * size * size8, cudaMemcpyDeviceToHost));
CUDA_RUN(cudaMemcpy(cost, dev_cost, num_blocks * size32, cudaMemcpyDeviceToHost));
printf("\nTime passed: %3.1f ms \n", time_passed);
print_Graph(graphWeights, size);
// Search of the block with lowest cost path
{
int32_t min = cost[0];
int8_t index = 0;
for (int i = 1; i < num_blocks; i++) {
if (cost[i] < min) {
min = cost[i];
index = i;
}
}
printf("Shortest path found on block #%d:\n", index + 1);
print_ShortestPath(&shortestPath[index * size], min, size);
}
Error: // In case of error, memory liberation
free(nodes);
free(shortestPath);
free(graphWeights);
free(cost);
free(choices);
cudaFree(dev_nodes_ids);
cudaFree(dev_shortestPath);
cudaFree(dev_graphWeights);
cudaFree(dev_cost);
cudaFree(dev_size);
cudaFree(dev_selected_K);
cudaFree(dev_choices);
cudaFree(dev_threads_per_kernel);
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
getchar();
return 0;
}
__global__
void find_permutations_for_threads(int8_t* city_ids, int8_t* k, int8_t* choices, int32_t* size, unsigned long long* threads_per_kernel) {
int32_t length = *size;
int8_t index = 1;
unsigned long long count = 0;
for (count = 0; count < *threads_per_kernel; count++) {
for (int i = 0; i < length; i++) {
choices[i + count * length] = city_ids[i];
}
reverse(city_ids + *k + index, city_ids + length);
next_permutation(city_ids + index, city_ids + length);
}
}
__global__
void combinations_kernel(int8_t* choices, int8_t* k, int8_t* shortestPath, int8_t* graphWeights, int32_t* cost, int32_t* size) {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
int32_t length = *size;
int8_t index = 1;
/* local variables */
int8_t* _path, * _shortestPath;
int32_t _tcost;
SAFE(_path = (int8_t*)malloc(length * sizeof(int8_t)));
SAFE(_shortestPath = (int8_t*)malloc(length * sizeof(int8_t)));
_tcost = length * 100;
memcpy(_path, choices + tid * length, length * sizeof(int8_t));
memcpy(_shortestPath, shortestPath, length * sizeof(int8_t));
if (threadIdx.x == 0) {
if (cost[blockIdx.x] == 0) cost[blockIdx.x] = length * 100;
shared_cost = length * 100;
}
__syncthreads();
do {
coppy_array(_path, _shortestPath, &_tcost, graphWeights, length, tid);
} while (next_permutation(_path + *k + index, _path + length));
if (_tcost == shared_cost) {
atomicMin(&cost[blockIdx.x], _tcost);
if (cost[blockIdx.x] == _tcost) {
memcpy(shortestPath + blockIdx.x * length, _shortestPath, length * sizeof(int8_t));
}
}
free(_path);
free(_shortestPath);
}
__host__
void initialize(int8_t* city_ids, int8_t* graphWeights, int32_t size) {
for (int i = 0; i < size; i++) {
city_ids[i] = i;
for (int j = 0; j < size; j++) {
if (i == j)
graphWeights[i * size + j] = 0;
else
graphWeights[i * size + j] = 99;
}
}
for (int i = 0; i < size; i++) {
for (int j = 0; j < size;) {
int next = 1; // (rand() % 2) + 1;
int road = rand() % 100 + 1;
if (i == j) {
j += next;
continue;
}
graphWeights[i * size + j] = road;
printf("%d\t", graphWeights[i * size + j]);
j += next;
}
}
for (int i = size - 1; i >= 0; i--) {
graphWeights[((i + 1) % size) * size + i] = 1;
}
}
__host__
void print_Graph(int8_t* graphWeights, int32_t size) {
int i, j;
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
printf("%d\t", graphWeights[i * size + j]);
}
printf("\n");
}
}
__host__
void print_ShortestPath(int8_t* shortestPath, int32_t cost, int32_t size) {
int i;
if (cost == (size * 100)) printf("no possible path found.\n");
else {
for (i = 0; i < size; i++) {
printf("%d\t", shortestPath[i]);
}
printf("\nCost: %d\n", cost);
}
}
__device__
void swap(int8_t* x, int8_t* y) { int8_t tmp = *x; *x = *y; *y = tmp; }
__device__
void reverse(int8_t* first, int8_t* last) { while ((first != last) && (first != --last)) swap(first++, last); }
__device__
void coppy_array(int8_t* path, int8_t* shortestPath, int32_t* tcost, int8_t* weights, int8_t length, int tid) {
int32_t sum = 0;
for (int32_t i = 0; i < length; i++) {
int8_t val = weights[path[i] * length + path[(i + 1) % length]];
if (val == -1) return;
sum += val;
}
if (sum == 0) return;
atomicMin(&shared_cost, sum);
if (shared_cost == sum) {
*tcost = sum;
memcpy(shortestPath, path, length * sizeof(int32_t));
}
}
__device__
bool next_permutation(int8_t* first, int8_t* last) {
if (first == last) return false;
int8_t* i = first;
++i;
if (i == last) return false;
i = last;
--i;
for (;;) {
int8_t* ii = i--;
if (*i < *ii) {
int8_t* j = last;
while (!(*i < *--j));
swap(i, j);
reverse(ii, last);
return true;
}
if (i == first) {
reverse(first, last);
return false;
}
}
} |
786c55c11097bc6fb448724426ae8ed5076f9726.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime_api.h>
#include <sys/time.h>
#include <algorithm>
#include <fstream>
#include <functional>
#include <random>
#include "HugeCTR/include/data_parser.hpp"
#include "HugeCTR/include/data_reader.hpp"
#include "HugeCTR/include/embedding.hpp"
#include "gtest/gtest.h"
#include "roctracer/roctx.h"
#include "utest/embedding/embedding_test_utils.hpp"
#include "utest/embedding/sparse_embedding_hash_cpu.hpp"
#include "utest/test_utils.h"
using namespace HugeCTR;
using namespace embedding_test;
namespace {
//---------------------------------------------------------------------------------------
// global params for all testing
const int train_batch_num = 10; // can not more than 32
const int test_batch_num = 1;
const int train_batchsize = 1024;
const int test_batchsize = 1024;
const int slot_num = 26;
const int max_nnz_per_slot = 1;
const int max_feature_num = max_nnz_per_slot * slot_num; // max_feature_num in a sample
const long long vocabulary_size = slot_num * 100;
const int embedding_vec_size = 128;
const int combiner = 0; // 0-sum, 1-mean
const long long label_dim = 1;
const long long dense_dim = 0;
typedef long long T;
const float scaler = 1.0f; // used in mixed precision training
const float lr = 0.01f;
// In order to not allocate the total size of hash table on each GPU, the users need to set the
// size of max_vocabulary_size_per_gpu, which should be more than vocabulary_size/gpu_count,
// eg: 1.25x of that.
const int num_chunk_threads = 1; // must be 1 for CPU and GPU results comparation
const int num_files = 1;
const Check_t CHK = Check_t::Sum; // Check_t::Sum
const char *train_file_list_name = "train_file_list.txt";
const char *test_file_list_name = "test_file_list.txt";
const char *prefix = "./data_reader_test_data/temp_dataset_";
#ifndef NCCl_A2A
const std::string plan_file(PROJECT_HOME_ + "utest/all2all_plan_dgx_{0,1,2,3,4,5,6,7}.json");
#else
const std::string plan_file = "";
#endif
const char *hash_table_file_name = "localized_hash_table.bin";
// std::vector<size_t> slot_sizes; // null means use vocabulary_size/gpu_count/load_factor as
// max_vocabulary_size_per_gpu
// CAUSION: must match vocabulary_size
// std::vector<size_t> slot_sizes = {39884406,39043,17289,7420,20263,3,7120,1543,63,38532951,
// 2953546,403346,10,2208,11938,155,4,976,14,39979771,25641295,39664984,585935,12972,108,36}; //
// for cretio dataset
std::vector<size_t> slot_sizes = {100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100}; // just for verify
//-----------------------------------------------------------------------------------------
template <typename TypeEmbeddingComp>
void train_and_test(const std::vector<int> &device_list, const Optimizer_t &optimizer,
bool global_update) {
OptHyperParams<TypeEmbeddingComp> hyper_params;
const OptParams<TypeEmbeddingComp> opt_params = {optimizer, lr, hyper_params, global_update,
scaler};
int numprocs = 1, pid = 0;
std::vector<std::vector<int>> vvgpu;
test::mpi_init();
#ifdef ENABLE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
#endif
// if there are multi-node, we assume each node has the same gpu device_list
for (int i = 0; i < numprocs; i++) {
vvgpu.push_back(device_list);
}
std::shared_ptr<DeviceMap> device_map(new DeviceMap(vvgpu, pid));
std::shared_ptr<GPUResourceGroup> gpu_resource_group(new GPUResourceGroup(device_map));
if (pid == 0) {
std::cout << "rank " << pid << " is generating data" << std::endl;
{
// re-generate the dataset files
std::ifstream file(train_file_list_name);
if (file.good()) {
std::remove(train_file_list_name);
}
}
{
// re-generate the dataset files
std::ifstream file(test_file_list_name);
if (file.good()) {
std::remove(test_file_list_name);
}
}
// data generation: key's corresponding slot_id=(key%slot_num)
if (slot_sizes.size() > 0) {
HugeCTR::data_generation_for_localized_test<T, CHK>(
train_file_list_name, prefix, num_files, train_batch_num * train_batchsize, slot_num,
vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes);
HugeCTR::data_generation_for_localized_test<T, CHK>(
test_file_list_name, prefix, num_files, test_batch_num * test_batchsize, slot_num,
vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes);
} else {
CK_THROW_(
Error_t::WrongInput,
"Must set slot_sizes since there is no hashtable in LocalizedSlotSpasrseEmbeddingOneHot");
}
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
std::cout << "This is rank: " << pid << std::endl;
#endif
// setup a data reader
const DataReaderSparseParam param = {DataReaderSparse_t::Localized, max_nnz_per_slot * slot_num,
max_nnz_per_slot, slot_num};
std::vector<DataReaderSparseParam> params;
params.push_back(param);
std::unique_ptr<DataReader<T>> train_data_reader(
new DataReader<T>(train_file_list_name, train_batchsize, label_dim, dense_dim, CHK, params,
gpu_resource_group, num_chunk_threads));
// generate hashtable
if (pid == 0) {
std::cout << "Init hash table";
// init hash table file: <key, solt_id, value>
std::ofstream weight_stream(hash_table_file_name);
if (!weight_stream.is_open()) {
ERROR_MESSAGE_("Error: file not open for writing");
}
// UnifiedDataSimulator<T> ldata_sim(0, slot_num-1); // for slot_id
UnifiedDataSimulator<float> fdata_sim(-0.1f, 0.1f); // for value
for (long long i = 0; i < vocabulary_size; i++) {
T key = (T)i;
// T key = ldata_sim.get_num();
// CAUSION: can not set random keys here, because we need to ensure that:
// 1) we can find keys in the data file from this hash table
// 2) there are no repeated keys
weight_stream.write((char *)&key, sizeof(T));
T slot_id;
if (slot_sizes.size() == 0) {
// slot_id = key % slot_num; // CAUSION: need to dedicate the slot_id for each key for
// // correctness verification
CK_THROW_(Error_t::WrongInput,
"Must set slot_sizes since there is no hashtable in "
"LocalizedSlotSpasrseEmbeddingOneHot");
} else {
size_t offset = 0;
for (size_t j = 0; j < slot_sizes.size(); j++) {
if ((key >= static_cast<T>(offset)) && (key < static_cast<T>(offset + slot_sizes[j]))) {
slot_id = (T)j;
break;
}
offset += slot_sizes[j];
}
}
weight_stream.write((char *)&slot_id, sizeof(T));
// float val = (float)i;
// float val = 0.1f;
float val = fdata_sim.get_num();
for (int j = 0; j < embedding_vec_size; j++) {
weight_stream.write((char *)&val, sizeof(float));
}
}
weight_stream.close();
std::cout << " Done" << std::endl;
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
const SparseEmbeddingHashParams<TypeEmbeddingComp> train_embedding_params = {
train_batchsize, 0, slot_sizes, embedding_vec_size,
max_feature_num, slot_num, combiner, opt_params};
std::unique_ptr<Embedding<T, TypeEmbeddingComp>> embedding(
EmbeddingCreator::create_localized_sparse_embedding_one_hot(
train_data_reader->get_row_offsets_tensors(), train_data_reader->get_value_tensors(),
train_embedding_params, plan_file, gpu_resource_group));
{
// upload hash table to device
std::ifstream fs(hash_table_file_name);
embedding->upload_params_to_device(fs);
fs.close();
}
// for SparseEmbeddingCpu
std::unique_ptr<SparseEmbeddingHashCpu<T, TypeEmbeddingComp>> embedding_cpu(
new SparseEmbeddingHashCpu<T, TypeEmbeddingComp>(
train_batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num,
label_dim, dense_dim, CHK, train_batch_num * train_batchsize, combiner, optimizer, lr,
train_file_list_name, hash_table_file_name, SparseEmbedding_t::Localized, global_update,
scaler));
TypeEmbeddingComp *embedding_feature_from_cpu = embedding_cpu->get_forward_results();
TypeEmbeddingComp *wgrad_from_cpu = embedding_cpu->get_backward_results();
T *hash_table_key_from_cpu = embedding_cpu->get_hash_table_key_ptr();
float *hash_table_value_from_cpu = embedding_cpu->get_hash_table_value_ptr();
// for results check
std::unique_ptr<TypeEmbeddingComp[]> embedding_feature_from_gpu(
new TypeEmbeddingComp[train_batchsize * slot_num * embedding_vec_size]);
std::unique_ptr<TypeEmbeddingComp[]> wgrad_from_gpu(
new TypeEmbeddingComp[train_batchsize * slot_num * embedding_vec_size]);
std::unique_ptr<T> hash_table_key_from_gpu(new T[vocabulary_size]);
std::unique_ptr<float> hash_table_value_from_gpu(new float[vocabulary_size * embedding_vec_size]);
typedef struct TypeHashValue_ {
float data[embedding_vec_size];
} TypeHashValue;
for (int i = 0; i < train_batch_num; i++) {
printf("Rank%d: Round %d start training:\n", pid, i);
// call read a batch
printf("Rank%d: data_reader->read_a_batch_to_device()\n", pid);
train_data_reader->read_a_batch_to_device();
// GPU forward
printf("Rank%d: embedding->forward()\n", pid);
embedding->forward();
// check the result of forward
printf("Rank%d: embedding->get_forward_results()\n", pid);
embedding->get_forward_results(embedding_feature_from_gpu.get()); // memcpy from GPU to CPU
if (pid == 0) {
// CPU forward
printf("Rank0: embedding_cpu->forward()\n");
embedding_cpu->forward();
printf("Rank0: check forward results\n");
ASSERT_EQ(true, compare_embedding_feature(train_batchsize * slot_num * embedding_vec_size,
embedding_feature_from_gpu.get(),
embedding_feature_from_cpu));
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
// GPU backward
printf("Rank%d: embedding->backward()\n", pid);
embedding->backward();
// check the result of backward
printf("Rank%d: embedding->get_backward_results()\n", pid);
embedding->get_backward_results(wgrad_from_gpu.get(), 0);
if (pid == 0) {
// CPU backward
printf("Rank0: embedding_cpu->backward()\n");
embedding_cpu->backward();
printf("Rank0: check backward results: GPU and CPU\n");
ASSERT_EQ(true, compare_wgrad(train_batchsize * slot_num * embedding_vec_size,
wgrad_from_gpu.get(), wgrad_from_cpu));
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
// GPU update_params
printf("Rank%d: embedding->update_params()\n", pid);
embedding->update_params();
#if 1 // can not check update_params since one-hot has no hashtable // TODO: need to modify
// get_update_params_results()
if (pid == 0) {
// CPU update_params
printf("Rank0: embedding_cpu->update_params()\n");
embedding_cpu->update_params();
}
#endif
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
printf("Rank%d: Round %d end:\n", pid, i);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// create new obj for eval()
{
std::ofstream fs(hash_table_file_name);
embedding->download_params_to_host(fs);
fs.close();
}
std::unique_ptr<DataReader<T>> test_data_reader(
new DataReader<T>(test_file_list_name, test_batchsize, label_dim, dense_dim, CHK, params,
gpu_resource_group, num_chunk_threads));
const SparseEmbeddingHashParams<TypeEmbeddingComp> test_embedding_params = {
test_batchsize, 0, slot_sizes, embedding_vec_size,
max_feature_num, slot_num, combiner, opt_params};
std::unique_ptr<Embedding<T, TypeEmbeddingComp>> test_embedding(
EmbeddingCreator::create_localized_sparse_embedding_one_hot(
test_data_reader->get_row_offsets_tensors(), test_data_reader->get_value_tensors(),
test_embedding_params, plan_file, gpu_resource_group));
{
std::ifstream fs(hash_table_file_name);
test_embedding->upload_params_to_device(fs);
fs.close();
}
// for SparseEmbeddingCpu eval
std::unique_ptr<SparseEmbeddingHashCpu<T, TypeEmbeddingComp>> test_embedding_cpu(
new SparseEmbeddingHashCpu<T, TypeEmbeddingComp>(
test_batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num, label_dim,
dense_dim, CHK, test_batch_num * test_batchsize, combiner, optimizer, lr,
test_file_list_name, hash_table_file_name, SparseEmbedding_t::Localized, global_update,
scaler));
TypeEmbeddingComp *embedding_feature_from_cpu_eval = test_embedding_cpu->get_forward_results();
// for results check
std::unique_ptr<TypeEmbeddingComp[]> embedding_feature_from_gpu_eval(
new TypeEmbeddingComp[test_batchsize * slot_num * embedding_vec_size]);
{
/////////////////////////////////////////////////////////////////////////////////////////////
// eval
printf("\nRank%d: Round start eval:\n", pid);
// call read a batch
printf("Rank%d: data_reader_eval->read_a_batch_to_device()\n", pid);
test_data_reader->read_a_batch_to_device();
// GPU forward
printf("Rank%d: embedding_eval->forward()\n", pid);
test_embedding->forward();
// check the result of forward
printf("Rank%d: embedding_eval->get_forward_results()\n", pid);
test_embedding->get_forward_results(
embedding_feature_from_gpu_eval.get()); // memcpy from GPU to CPU
if (pid == 0) {
// CPU forward
printf("Rank0: embedding_cpu_eval->forward()\n");
test_embedding_cpu->forward();
printf("Rank0: check forward results\n");
ASSERT_EQ(true, compare_embedding_feature(test_batchsize * slot_num * embedding_vec_size,
embedding_feature_from_gpu_eval.get(),
embedding_feature_from_cpu_eval));
}
}
test::mpi_finialize();
}
} // namespace
TEST(localized_sparse_embedding_one_hot_test, fp32_sgd_1gpu) {
train_and_test<float>({0}, Optimizer_t::SGD, false);
}
TEST(localized_sparse_embedding_one_hot_test, fp32_sgd_4gpu) {
train_and_test<float>({0, 1, 2, 3}, Optimizer_t::SGD, false);
}
TEST(localized_sparse_embedding_one_hot_test, fp32_sgd_global_update_1gpu) {
train_and_test<float>({0}, Optimizer_t::SGD, true);
}
TEST(localized_sparse_embedding_one_hot_test, fp32_sgd_global_update_4gpu) {
train_and_test<float>({0, 1, 2, 3}, Optimizer_t::SGD, true);
}
TEST(localized_sparse_embedding_one_hot_test, fp16_sgd_1gpu) {
train_and_test<__half>({0}, Optimizer_t::SGD, false);
}
TEST(localized_sparse_embedding_one_hot_test, fp16_sgd_4gpu) {
train_and_test<__half>({0, 1, 2, 3}, Optimizer_t::SGD, false);
}
TEST(localized_sparse_embedding_one_hot_test, fp16_sgd_global_update_1gpu) {
train_and_test<__half>({0}, Optimizer_t::SGD, true);
}
TEST(localized_sparse_embedding_one_hot_test, fp16_sgd_global_update_4gpu) {
train_and_test<__half>({0, 1, 2, 3}, Optimizer_t::SGD, true);
}
| 786c55c11097bc6fb448724426ae8ed5076f9726.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_profiler_api.h>
#include <sys/time.h>
#include <algorithm>
#include <fstream>
#include <functional>
#include <random>
#include "HugeCTR/include/data_parser.hpp"
#include "HugeCTR/include/data_reader.hpp"
#include "HugeCTR/include/embedding.hpp"
#include "gtest/gtest.h"
#include "nvToolsExt.h"
#include "utest/embedding/embedding_test_utils.hpp"
#include "utest/embedding/sparse_embedding_hash_cpu.hpp"
#include "utest/test_utils.h"
using namespace HugeCTR;
using namespace embedding_test;
namespace {
//---------------------------------------------------------------------------------------
// global params for all testing
const int train_batch_num = 10; // can not more than 32
const int test_batch_num = 1;
const int train_batchsize = 1024;
const int test_batchsize = 1024;
const int slot_num = 26;
const int max_nnz_per_slot = 1;
const int max_feature_num = max_nnz_per_slot * slot_num; // max_feature_num in a sample
const long long vocabulary_size = slot_num * 100;
const int embedding_vec_size = 128;
const int combiner = 0; // 0-sum, 1-mean
const long long label_dim = 1;
const long long dense_dim = 0;
typedef long long T;
const float scaler = 1.0f; // used in mixed precision training
const float lr = 0.01f;
// In order to not allocate the total size of hash table on each GPU, the users need to set the
// size of max_vocabulary_size_per_gpu, which should be more than vocabulary_size/gpu_count,
// eg: 1.25x of that.
const int num_chunk_threads = 1; // must be 1 for CPU and GPU results comparation
const int num_files = 1;
const Check_t CHK = Check_t::Sum; // Check_t::Sum
const char *train_file_list_name = "train_file_list.txt";
const char *test_file_list_name = "test_file_list.txt";
const char *prefix = "./data_reader_test_data/temp_dataset_";
#ifndef NCCl_A2A
const std::string plan_file(PROJECT_HOME_ + "utest/all2all_plan_dgx_{0,1,2,3,4,5,6,7}.json");
#else
const std::string plan_file = "";
#endif
const char *hash_table_file_name = "localized_hash_table.bin";
// std::vector<size_t> slot_sizes; // null means use vocabulary_size/gpu_count/load_factor as
// max_vocabulary_size_per_gpu
// CAUSION: must match vocabulary_size
// std::vector<size_t> slot_sizes = {39884406,39043,17289,7420,20263,3,7120,1543,63,38532951,
// 2953546,403346,10,2208,11938,155,4,976,14,39979771,25641295,39664984,585935,12972,108,36}; //
// for cretio dataset
std::vector<size_t> slot_sizes = {100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100}; // just for verify
//-----------------------------------------------------------------------------------------
template <typename TypeEmbeddingComp>
void train_and_test(const std::vector<int> &device_list, const Optimizer_t &optimizer,
bool global_update) {
OptHyperParams<TypeEmbeddingComp> hyper_params;
const OptParams<TypeEmbeddingComp> opt_params = {optimizer, lr, hyper_params, global_update,
scaler};
int numprocs = 1, pid = 0;
std::vector<std::vector<int>> vvgpu;
test::mpi_init();
#ifdef ENABLE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
#endif
// if there are multi-node, we assume each node has the same gpu device_list
for (int i = 0; i < numprocs; i++) {
vvgpu.push_back(device_list);
}
std::shared_ptr<DeviceMap> device_map(new DeviceMap(vvgpu, pid));
std::shared_ptr<GPUResourceGroup> gpu_resource_group(new GPUResourceGroup(device_map));
if (pid == 0) {
std::cout << "rank " << pid << " is generating data" << std::endl;
{
// re-generate the dataset files
std::ifstream file(train_file_list_name);
if (file.good()) {
std::remove(train_file_list_name);
}
}
{
// re-generate the dataset files
std::ifstream file(test_file_list_name);
if (file.good()) {
std::remove(test_file_list_name);
}
}
// data generation: key's corresponding slot_id=(key%slot_num)
if (slot_sizes.size() > 0) {
HugeCTR::data_generation_for_localized_test<T, CHK>(
train_file_list_name, prefix, num_files, train_batch_num * train_batchsize, slot_num,
vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes);
HugeCTR::data_generation_for_localized_test<T, CHK>(
test_file_list_name, prefix, num_files, test_batch_num * test_batchsize, slot_num,
vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes);
} else {
CK_THROW_(
Error_t::WrongInput,
"Must set slot_sizes since there is no hashtable in LocalizedSlotSpasrseEmbeddingOneHot");
}
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
std::cout << "This is rank: " << pid << std::endl;
#endif
// setup a data reader
const DataReaderSparseParam param = {DataReaderSparse_t::Localized, max_nnz_per_slot * slot_num,
max_nnz_per_slot, slot_num};
std::vector<DataReaderSparseParam> params;
params.push_back(param);
std::unique_ptr<DataReader<T>> train_data_reader(
new DataReader<T>(train_file_list_name, train_batchsize, label_dim, dense_dim, CHK, params,
gpu_resource_group, num_chunk_threads));
// generate hashtable
if (pid == 0) {
std::cout << "Init hash table";
// init hash table file: <key, solt_id, value>
std::ofstream weight_stream(hash_table_file_name);
if (!weight_stream.is_open()) {
ERROR_MESSAGE_("Error: file not open for writing");
}
// UnifiedDataSimulator<T> ldata_sim(0, slot_num-1); // for slot_id
UnifiedDataSimulator<float> fdata_sim(-0.1f, 0.1f); // for value
for (long long i = 0; i < vocabulary_size; i++) {
T key = (T)i;
// T key = ldata_sim.get_num();
// CAUSION: can not set random keys here, because we need to ensure that:
// 1) we can find keys in the data file from this hash table
// 2) there are no repeated keys
weight_stream.write((char *)&key, sizeof(T));
T slot_id;
if (slot_sizes.size() == 0) {
// slot_id = key % slot_num; // CAUSION: need to dedicate the slot_id for each key for
// // correctness verification
CK_THROW_(Error_t::WrongInput,
"Must set slot_sizes since there is no hashtable in "
"LocalizedSlotSpasrseEmbeddingOneHot");
} else {
size_t offset = 0;
for (size_t j = 0; j < slot_sizes.size(); j++) {
if ((key >= static_cast<T>(offset)) && (key < static_cast<T>(offset + slot_sizes[j]))) {
slot_id = (T)j;
break;
}
offset += slot_sizes[j];
}
}
weight_stream.write((char *)&slot_id, sizeof(T));
// float val = (float)i;
// float val = 0.1f;
float val = fdata_sim.get_num();
for (int j = 0; j < embedding_vec_size; j++) {
weight_stream.write((char *)&val, sizeof(float));
}
}
weight_stream.close();
std::cout << " Done" << std::endl;
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
const SparseEmbeddingHashParams<TypeEmbeddingComp> train_embedding_params = {
train_batchsize, 0, slot_sizes, embedding_vec_size,
max_feature_num, slot_num, combiner, opt_params};
std::unique_ptr<Embedding<T, TypeEmbeddingComp>> embedding(
EmbeddingCreator::create_localized_sparse_embedding_one_hot(
train_data_reader->get_row_offsets_tensors(), train_data_reader->get_value_tensors(),
train_embedding_params, plan_file, gpu_resource_group));
{
// upload hash table to device
std::ifstream fs(hash_table_file_name);
embedding->upload_params_to_device(fs);
fs.close();
}
// for SparseEmbeddingCpu
std::unique_ptr<SparseEmbeddingHashCpu<T, TypeEmbeddingComp>> embedding_cpu(
new SparseEmbeddingHashCpu<T, TypeEmbeddingComp>(
train_batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num,
label_dim, dense_dim, CHK, train_batch_num * train_batchsize, combiner, optimizer, lr,
train_file_list_name, hash_table_file_name, SparseEmbedding_t::Localized, global_update,
scaler));
TypeEmbeddingComp *embedding_feature_from_cpu = embedding_cpu->get_forward_results();
TypeEmbeddingComp *wgrad_from_cpu = embedding_cpu->get_backward_results();
T *hash_table_key_from_cpu = embedding_cpu->get_hash_table_key_ptr();
float *hash_table_value_from_cpu = embedding_cpu->get_hash_table_value_ptr();
// for results check
std::unique_ptr<TypeEmbeddingComp[]> embedding_feature_from_gpu(
new TypeEmbeddingComp[train_batchsize * slot_num * embedding_vec_size]);
std::unique_ptr<TypeEmbeddingComp[]> wgrad_from_gpu(
new TypeEmbeddingComp[train_batchsize * slot_num * embedding_vec_size]);
std::unique_ptr<T> hash_table_key_from_gpu(new T[vocabulary_size]);
std::unique_ptr<float> hash_table_value_from_gpu(new float[vocabulary_size * embedding_vec_size]);
typedef struct TypeHashValue_ {
float data[embedding_vec_size];
} TypeHashValue;
for (int i = 0; i < train_batch_num; i++) {
printf("Rank%d: Round %d start training:\n", pid, i);
// call read a batch
printf("Rank%d: data_reader->read_a_batch_to_device()\n", pid);
train_data_reader->read_a_batch_to_device();
// GPU forward
printf("Rank%d: embedding->forward()\n", pid);
embedding->forward();
// check the result of forward
printf("Rank%d: embedding->get_forward_results()\n", pid);
embedding->get_forward_results(embedding_feature_from_gpu.get()); // memcpy from GPU to CPU
if (pid == 0) {
// CPU forward
printf("Rank0: embedding_cpu->forward()\n");
embedding_cpu->forward();
printf("Rank0: check forward results\n");
ASSERT_EQ(true, compare_embedding_feature(train_batchsize * slot_num * embedding_vec_size,
embedding_feature_from_gpu.get(),
embedding_feature_from_cpu));
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
// GPU backward
printf("Rank%d: embedding->backward()\n", pid);
embedding->backward();
// check the result of backward
printf("Rank%d: embedding->get_backward_results()\n", pid);
embedding->get_backward_results(wgrad_from_gpu.get(), 0);
if (pid == 0) {
// CPU backward
printf("Rank0: embedding_cpu->backward()\n");
embedding_cpu->backward();
printf("Rank0: check backward results: GPU and CPU\n");
ASSERT_EQ(true, compare_wgrad(train_batchsize * slot_num * embedding_vec_size,
wgrad_from_gpu.get(), wgrad_from_cpu));
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
// GPU update_params
printf("Rank%d: embedding->update_params()\n", pid);
embedding->update_params();
#if 1 // can not check update_params since one-hot has no hashtable // TODO: need to modify
// get_update_params_results()
if (pid == 0) {
// CPU update_params
printf("Rank0: embedding_cpu->update_params()\n");
embedding_cpu->update_params();
}
#endif
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
printf("Rank%d: Round %d end:\n", pid, i);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// create new obj for eval()
{
std::ofstream fs(hash_table_file_name);
embedding->download_params_to_host(fs);
fs.close();
}
std::unique_ptr<DataReader<T>> test_data_reader(
new DataReader<T>(test_file_list_name, test_batchsize, label_dim, dense_dim, CHK, params,
gpu_resource_group, num_chunk_threads));
const SparseEmbeddingHashParams<TypeEmbeddingComp> test_embedding_params = {
test_batchsize, 0, slot_sizes, embedding_vec_size,
max_feature_num, slot_num, combiner, opt_params};
std::unique_ptr<Embedding<T, TypeEmbeddingComp>> test_embedding(
EmbeddingCreator::create_localized_sparse_embedding_one_hot(
test_data_reader->get_row_offsets_tensors(), test_data_reader->get_value_tensors(),
test_embedding_params, plan_file, gpu_resource_group));
{
std::ifstream fs(hash_table_file_name);
test_embedding->upload_params_to_device(fs);
fs.close();
}
// for SparseEmbeddingCpu eval
std::unique_ptr<SparseEmbeddingHashCpu<T, TypeEmbeddingComp>> test_embedding_cpu(
new SparseEmbeddingHashCpu<T, TypeEmbeddingComp>(
test_batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num, label_dim,
dense_dim, CHK, test_batch_num * test_batchsize, combiner, optimizer, lr,
test_file_list_name, hash_table_file_name, SparseEmbedding_t::Localized, global_update,
scaler));
TypeEmbeddingComp *embedding_feature_from_cpu_eval = test_embedding_cpu->get_forward_results();
// for results check
std::unique_ptr<TypeEmbeddingComp[]> embedding_feature_from_gpu_eval(
new TypeEmbeddingComp[test_batchsize * slot_num * embedding_vec_size]);
{
/////////////////////////////////////////////////////////////////////////////////////////////
// eval
printf("\nRank%d: Round start eval:\n", pid);
// call read a batch
printf("Rank%d: data_reader_eval->read_a_batch_to_device()\n", pid);
test_data_reader->read_a_batch_to_device();
// GPU forward
printf("Rank%d: embedding_eval->forward()\n", pid);
test_embedding->forward();
// check the result of forward
printf("Rank%d: embedding_eval->get_forward_results()\n", pid);
test_embedding->get_forward_results(
embedding_feature_from_gpu_eval.get()); // memcpy from GPU to CPU
if (pid == 0) {
// CPU forward
printf("Rank0: embedding_cpu_eval->forward()\n");
test_embedding_cpu->forward();
printf("Rank0: check forward results\n");
ASSERT_EQ(true, compare_embedding_feature(test_batchsize * slot_num * embedding_vec_size,
embedding_feature_from_gpu_eval.get(),
embedding_feature_from_cpu_eval));
}
}
test::mpi_finialize();
}
} // namespace
TEST(localized_sparse_embedding_one_hot_test, fp32_sgd_1gpu) {
train_and_test<float>({0}, Optimizer_t::SGD, false);
}
TEST(localized_sparse_embedding_one_hot_test, fp32_sgd_4gpu) {
train_and_test<float>({0, 1, 2, 3}, Optimizer_t::SGD, false);
}
TEST(localized_sparse_embedding_one_hot_test, fp32_sgd_global_update_1gpu) {
train_and_test<float>({0}, Optimizer_t::SGD, true);
}
TEST(localized_sparse_embedding_one_hot_test, fp32_sgd_global_update_4gpu) {
train_and_test<float>({0, 1, 2, 3}, Optimizer_t::SGD, true);
}
TEST(localized_sparse_embedding_one_hot_test, fp16_sgd_1gpu) {
train_and_test<__half>({0}, Optimizer_t::SGD, false);
}
TEST(localized_sparse_embedding_one_hot_test, fp16_sgd_4gpu) {
train_and_test<__half>({0, 1, 2, 3}, Optimizer_t::SGD, false);
}
TEST(localized_sparse_embedding_one_hot_test, fp16_sgd_global_update_1gpu) {
train_and_test<__half>({0}, Optimizer_t::SGD, true);
}
TEST(localized_sparse_embedding_one_hot_test, fp16_sgd_global_update_4gpu) {
train_and_test<__half>({0, 1, 2, 3}, Optimizer_t::SGD, true);
}
|
e182ef7d0832263717289703ec5ae2dcd8ccb40d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define TileSize 32 //Tilesize for shared memory
#define stride 8 //Stride for thread coarsening
//Optimised GEMM Kernel using Tiling and Thread Coarsening
__global__ void gemm(float *A, float *B,float *C,int m,int n,int k)
{
//Calculating index of tile where data is to loaded
int r=threadIdx.x;
int c=threadIdx.y;
//Calculating index of that element in input matrix
int globalRow=TileSize*blockIdx.x + r;
int globalCol=TileSize*blockIdx.y + c;
//Assuming new tile size because of coarsening
int newTileSize=(TileSize/stride);
//Shared memory declaration
__shared__ float S1[TileSize][TileSize];
__shared__ float S2[TileSize][TileSize];
//Final answers for stride number of indexes and initializing with 0
float val[stride];
for(int i=0;i<stride;i++)
val[i]=0;
//Number of tiles that will be iterated
int numTiles=ceil((float)((float)k)/((float)TileSize));
//Loading each tile using strides
for(int i=0;i<numTiles;i++)
{
for(int j=0;j<stride;j++)
{
//Calculating the index from where the value needs to be loaded
int row=TileSize*i + r;
int col=TileSize*i + c;
//Storing the values in the tiles
if((col + j*newTileSize)<k && globalRow<m)
S1[c + j*newTileSize][r]=A[(col + j*newTileSize)*m + globalRow];
else S1[c + j*newTileSize][r]=0;
if(row<k && (globalCol + j*newTileSize)<n )
S2[c + j*newTileSize][r]=B[(globalCol + j*newTileSize)*k + row];
else S2[c + j*newTileSize][r]=0;
}
__syncthreads();
//Multiplication of the rows and col in the stored memory
for(int l=0;l<TileSize;l++)
{
for(int j=0;j<stride;j++)
{
val[j]+=S1[l][r]*S2[c + j*newTileSize][l];
}
}
__syncthreads();
}
//Storing final value in the output matrix
for(int j=0;j<stride;j++)
{
if((globalCol + j*newTileSize)<n && globalRow<m)
C[(globalCol + j*newTileSize)*m + globalRow]=val[j];
}
}
int main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Input the length and width of the matrices
int length1,length2,width1,width2;
printf("Enter values of length1,width1,length2,width2:");
scanf("%d %d %d %d",&length1,&width1,&length2,&width2);
//Calculate size of matrices in bytes
int numElements = length1*width1;
size_t size = numElements * sizeof(float);
int numElements2 = length2*width2;
size_t size2 = numElements2 * sizeof(float);
int numElements3 = length1*width2;
size_t size3 = numElements3 * sizeof(float);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size2);
// Allocate the host output vector C
float *h_C = (float *)malloc(size3);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C==NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors for testing purpose
for (int i = 0; i < numElements; ++i)
{
h_A[i]=i%8;
}
for (int i = 0; i < numElements2; ++i)
{
h_B[i]=i%8;
}
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size2);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size3);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in device memory
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size2, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the GEMM CUDA Kernel
//Define block and grid dimensions
dim3 block(32,4,1);
dim3 grid(max(length1,length2)/32 + 1,max(width1,width2)/32 + 1,1);
//Use cuda events to determine time taken
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
//Launch the gemm kernel
hipLaunchKernelGGL(( gemm), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C,length1,width2,width1);
//Calculate the time taken by the Kernel
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("The elapsed time in gpu was %f ms\n", milliseconds);
//Check for any error in launch of kernel
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch gemm kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector in host memory.
err = hipMemcpy(h_C, d_C, size3 , hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct by performing the operation in CPU
float arr[length1][width2]; //Result array made in CPU side to verify the results
for(int i=0;i<length1;i++){
for(int j=0;j<width2;j++){
arr[i][j]=0;
}
}
//Monitor Time taken in serial execution in CPU side for comparison
clock_t cpu_start, cpu_end;
double cpu_time_used;
cpu_start = clock();
int f=0;
for(int i=0;i<length1;i++)
{
for(int j=0;j<width2;j++)
{
for(int k=0;k<length2;k++)
{
arr[i][j]+=h_A[k*length1 + i]*h_B[j*length2 + k];
}
if(arr[i][j]!=h_C[j*length1 + i]){
f=1;
}
}
}
/*
Code to print both side results if necessary
for(int i=0;i<length1;i++)
{
for(int j=0;j<width2;j++)
{
printf("%f ",arr[i][j]);
}
printf("\n");
}
for(int i=0;i<length1;i++)
{
for(int j=0;j<width2;j++)
{
printf("%f ",h_C[j*length1 + i]);
}
printf("\n");
}
*/
//Serial time execution printing
cpu_end = clock();
cpu_time_used = ((double) (cpu_end - cpu_start)) / CLOCKS_PER_SEC;
printf("\nTime elapsed in serial execution:%f ms\n",cpu_time_used*1000.00);
//If both CPU side and GPU side results match or not
if(!f)
printf("Success!!\n");
else
printf("Failure!!\n");
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Reset the device and exit
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("\n");
// Free host memory
free(h_A);
free(h_B);
free(h_C);
return 0;
}
| e182ef7d0832263717289703ec5ae2dcd8ccb40d.cu |
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <time.h>
#define TileSize 32 //Tilesize for shared memory
#define stride 8 //Stride for thread coarsening
//Optimised GEMM Kernel using Tiling and Thread Coarsening
__global__ void gemm(float *A, float *B,float *C,int m,int n,int k)
{
//Calculating index of tile where data is to loaded
int r=threadIdx.x;
int c=threadIdx.y;
//Calculating index of that element in input matrix
int globalRow=TileSize*blockIdx.x + r;
int globalCol=TileSize*blockIdx.y + c;
//Assuming new tile size because of coarsening
int newTileSize=(TileSize/stride);
//Shared memory declaration
__shared__ float S1[TileSize][TileSize];
__shared__ float S2[TileSize][TileSize];
//Final answers for stride number of indexes and initializing with 0
float val[stride];
for(int i=0;i<stride;i++)
val[i]=0;
//Number of tiles that will be iterated
int numTiles=ceil((float)((float)k)/((float)TileSize));
//Loading each tile using strides
for(int i=0;i<numTiles;i++)
{
for(int j=0;j<stride;j++)
{
//Calculating the index from where the value needs to be loaded
int row=TileSize*i + r;
int col=TileSize*i + c;
//Storing the values in the tiles
if((col + j*newTileSize)<k && globalRow<m)
S1[c + j*newTileSize][r]=A[(col + j*newTileSize)*m + globalRow];
else S1[c + j*newTileSize][r]=0;
if(row<k && (globalCol + j*newTileSize)<n )
S2[c + j*newTileSize][r]=B[(globalCol + j*newTileSize)*k + row];
else S2[c + j*newTileSize][r]=0;
}
__syncthreads();
//Multiplication of the rows and col in the stored memory
for(int l=0;l<TileSize;l++)
{
for(int j=0;j<stride;j++)
{
val[j]+=S1[l][r]*S2[c + j*newTileSize][l];
}
}
__syncthreads();
}
//Storing final value in the output matrix
for(int j=0;j<stride;j++)
{
if((globalCol + j*newTileSize)<n && globalRow<m)
C[(globalCol + j*newTileSize)*m + globalRow]=val[j];
}
}
int main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Input the length and width of the matrices
int length1,length2,width1,width2;
printf("Enter values of length1,width1,length2,width2:");
scanf("%d %d %d %d",&length1,&width1,&length2,&width2);
//Calculate size of matrices in bytes
int numElements = length1*width1;
size_t size = numElements * sizeof(float);
int numElements2 = length2*width2;
size_t size2 = numElements2 * sizeof(float);
int numElements3 = length1*width2;
size_t size3 = numElements3 * sizeof(float);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size2);
// Allocate the host output vector C
float *h_C = (float *)malloc(size3);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C==NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors for testing purpose
for (int i = 0; i < numElements; ++i)
{
h_A[i]=i%8;
}
for (int i = 0; i < numElements2; ++i)
{
h_B[i]=i%8;
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size2);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size3);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in device memory
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size2, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the GEMM CUDA Kernel
//Define block and grid dimensions
dim3 block(32,4,1);
dim3 grid(max(length1,length2)/32 + 1,max(width1,width2)/32 + 1,1);
//Use cuda events to determine time taken
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//Launch the gemm kernel
gemm<<<grid, block>>>(d_A, d_B, d_C,length1,width2,width1);
//Calculate the time taken by the Kernel
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("The elapsed time in gpu was %f ms\n", milliseconds);
//Check for any error in launch of kernel
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch gemm kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector in host memory.
err = cudaMemcpy(h_C, d_C, size3 , cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct by performing the operation in CPU
float arr[length1][width2]; //Result array made in CPU side to verify the results
for(int i=0;i<length1;i++){
for(int j=0;j<width2;j++){
arr[i][j]=0;
}
}
//Monitor Time taken in serial execution in CPU side for comparison
clock_t cpu_start, cpu_end;
double cpu_time_used;
cpu_start = clock();
int f=0;
for(int i=0;i<length1;i++)
{
for(int j=0;j<width2;j++)
{
for(int k=0;k<length2;k++)
{
arr[i][j]+=h_A[k*length1 + i]*h_B[j*length2 + k];
}
if(arr[i][j]!=h_C[j*length1 + i]){
f=1;
}
}
}
/*
Code to print both side results if necessary
for(int i=0;i<length1;i++)
{
for(int j=0;j<width2;j++)
{
printf("%f ",arr[i][j]);
}
printf("\n");
}
for(int i=0;i<length1;i++)
{
for(int j=0;j<width2;j++)
{
printf("%f ",h_C[j*length1 + i]);
}
printf("\n");
}
*/
//Serial time execution printing
cpu_end = clock();
cpu_time_used = ((double) (cpu_end - cpu_start)) / CLOCKS_PER_SEC;
printf("\nTime elapsed in serial execution:%f ms\n",cpu_time_used*1000.00);
//If both CPU side and GPU side results match or not
if(!f)
printf("Success!!\n");
else
printf("Failure!!\n");
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Reset the device and exit
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("\n");
// Free host memory
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
8c8446fdc05f4a0c587e7028222a56f00c310fe3.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/phi/backends/dynload/cudnn.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/conv_transpose_grad_kernel.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/funcs/batch_norm_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/padding.h"
#include "paddle/phi/kernels/funcs/slice.h"
#include "paddle/phi/kernels/transpose_kernel.h"
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/operators/conv_miopen_helper.h"
#include "paddle/fluid/platform/device/gpu/rocm/miopen_helper.h"
#else
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#include "paddle/fluid/platform/device/gpu/cuda/cudnn_helper.h"
#endif
namespace phi {
using GPUDNNDataLayout = paddle::platform::DataLayout;
template <typename T, typename Context>
void ConvTransposeGradRawGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& filter,
const DenseTensor& dout,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
DenseTensor* dx,
DenseTensor* dfilter) {
const T* filter_data = filter.data<T>();
std::vector<int> paddings_ = paddings;
std::vector<int> dilations_ =
dilations; // cudnn v5 does not support dilations
const GPUDNNDataLayout data_layout =
(data_format != "NHWC" ? GPUDNNDataLayout::kNCHW
: GPUDNNDataLayout::kNHWC);
// if channel_last, transpose to channel_first
DenseTensor x_transpose;
DenseTensor dout_transpose;
std::vector<int> x_vec = vectorize<int>(x.dims());
std::vector<int> out_vec = vectorize<int>(dout.dims());
if (data_layout == GPUDNNDataLayout::kNHWC) {
if (strides.size() == 2U) {
std::vector<int> axis = {0, 3, 1, 2};
for (size_t i = 0; i < axis.size(); ++i) {
x_vec[i] = x.dims()[axis[i]];
out_vec[i] = dout.dims()[axis[i]];
}
x_transpose = Transpose<T, Context>(ctx, x, axis);
dout_transpose = Transpose<T, Context>(ctx, dout, axis);
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 4, 1, 2, 3};
for (size_t i = 0; i < axis.size(); ++i) {
x_vec[i] = x.dims()[axis[i]];
out_vec[i] = dout.dims()[axis[i]];
}
x_transpose = Transpose<T, Context>(ctx, x, axis);
dout_transpose = Transpose<T, Context>(ctx, dout, axis);
}
} else {
x_transpose = x;
dout_transpose = dout;
}
// update padding and dilation
auto x_dims = x_transpose.dims();
auto filter_dims = filter.dims();
DDim x_data_dims;
x_data_dims = slice_ddim(x_dims, 2, x_dims.size());
DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(
&paddings_, &dilations_, padding_algorithm, x_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = funcs::IsSymmetricPadding(paddings_, data_dim);
std::vector<int> x_pad(x_dims.size() * 2, 0);
DenseTensor transformed_dout;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_dout_shape_vec(data_dim + 2);
new_dout_shape_vec[0] = dout_transpose.dims()[0];
new_dout_shape_vec[1] = dout_transpose.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings_[2 * i] - paddings_[2 * i + 1]);
padding_common[i] = ::min(paddings_[2 * i], paddings_[2 * i + 1]);
new_dout_shape_vec[i + 2] =
dout_transpose.dims()[i + 2] + padding_diff[i];
x_pad[2 * i + 4] = paddings_[2 * i] - padding_common[i];
x_pad[2 * i + 4 + 1] = paddings_[2 * i + 1] - padding_common[i];
}
transformed_dout.Resize(make_ddim(new_dout_shape_vec));
ctx.template Alloc<T>(&transformed_dout);
const int rank = x_transpose.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
funcs::PadFunction<Context, T, 4>(
ctx, x_pad, dout_transpose, pad_value, &transformed_dout);
} break;
case 5: {
funcs::PadFunction<Context, T, 5>(
ctx, x_pad, dout_transpose, pad_value, &transformed_dout);
} break;
default:
PADDLE_THROW(errors::InvalidArgument(
"Op(ConvTranspose) only supports 4-D or 5-D x DenseTensor."));
}
} else {
transformed_dout = dout_transpose;
if (paddings_.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings_[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings_[2 * i];
}
}
}
const T* x_data = x_transpose.data<T>();
const T* dout_data = transformed_dout.data<T>();
out_vec = vectorize<int>(transformed_dout.dims());
// ------------------- cudnn descriptors ---------------------
GPUDNNDataLayout layout;
if (strides.size() == 2U) {
layout = GPUDNNDataLayout::kNCHW;
} else {
layout = GPUDNNDataLayout::kNCDHW;
}
int iwo_groups = groups;
int c_groups = 1;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups = 1;
c_groups = groups;
groups = 1;
#endif
auto dtype = paddle::platform::CudnnDataType<T>::type;
paddle::operators::ConvArgs args1{&transformed_dout,
&filter,
&x_transpose,
strides,
padding_common,
dilations_,
dtype};
paddle::operators::ConvArgs args2{&transformed_dout,
&filter,
&x_transpose,
strides,
padding_common,
dilations_,
dtype};
#ifdef PADDLE_WITH_HIP
paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result;
paddle::operators::SearchResult<miopenConvBwdWeightsAlgorithm_t>
filter_result;
#else
paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result;
paddle::operators::SearchResult<cudnnConvolutionBwdFilterAlgo_t>
filter_result;
#endif
auto layout_tensor = paddle::platform::GetCudnnTensorFormat(layout);
size_t workspace_size = 0;
auto handle = ctx.cudnn_handle();
bool deterministic = FLAGS_cudnn_deterministic;
T* dx_data = nullptr;
T* dfilter_data = nullptr;
if (dx) {
dx_data = ctx.template Alloc<T>(dx);
args1.handle = handle;
args1.idesc.set(transformed_dout, iwo_groups);
args1.wdesc.set(filter, layout_tensor, iwo_groups);
args1.odesc.set(x_transpose, iwo_groups);
args1.cdesc.set(dtype,
padding_common,
strides,
dilations_,
paddle::platform::AllowTF32Cudnn(),
c_groups);
#ifdef PADDLE_WITH_HIP
using search1 =
paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size = ::max(workspace_size, search1::GetWorkspaceSize(args1));
fwd_result.algo =
search1::Find<T>(args1, false, deterministic, workspace_size, ctx);
#else
using search1 =
paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_result = search1::Find<T>(args1, false, deterministic, ctx);
workspace_size = ::max(
workspace_size, search1::GetWorkspaceSize(args1, fwd_result.algo));
#endif
}
if (dfilter) {
dfilter_data = ctx.template Alloc<T>(dfilter);
args2.handle = handle;
args2.idesc.set(transformed_dout, iwo_groups);
args2.wdesc.set(*dfilter, layout_tensor, iwo_groups);
args2.odesc.set(x_transpose, iwo_groups);
args2.cdesc.set(dtype,
padding_common,
strides,
dilations_,
paddle::platform::AllowTF32Cudnn(),
c_groups);
#ifdef PADDLE_WITH_HIP
using search2 =
paddle::operators::SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>;
workspace_size = ::max(workspace_size, search2::GetWorkspaceSize(args2));
filter_result.algo =
search2::Find<T>(args2, false, deterministic, workspace_size, ctx);
#else
using search2 =
paddle::operators::SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_result = search2::Find<T>(args2, false, deterministic, ctx);
workspace_size = ::max(
workspace_size, search2::GetWorkspaceSize(args2, filter_result.algo));
#endif
}
// ------------------- cudnn conv backward data ---------------------
// FIxME(typhoonzero): template type T may not be the same as cudnn call.
int x_offset = x.numel() / x.dims()[0] / groups;
int dout_offset =
transformed_dout.numel() / transformed_dout.dims()[0] / groups;
int filter_offset = filter.numel() / groups;
paddle::operators::ScalingParamType<T> alpha = 1.0f;
paddle::operators::ScalingParamType<T> beta = 0.0f;
auto workspace_handle = ctx.cudnn_workspace_handle();
if (dx) {
// Because beta is zero, it is unnecessary to reset dx.
for (int g = 0; g < groups; g++) {
#ifdef PADDLE_WITH_HIP
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::miopenConvolutionForward(handle,
&alpha,
args1.idesc.desc(),
dout_data + dout_offset * g,
args1.wdesc.desc(),
filter_data + filter_offset * g,
args1.cdesc.desc(),
fwd_result.algo,
&beta,
args1.odesc.desc(),
dx_data + x_offset * g,
cudnn_workspace,
workspace_size));
};
#else // PADDLE_WITH_HIP
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::cudnnConvolutionForward(handle,
&alpha,
args1.idesc.desc(),
dout_data + dout_offset * g,
args1.wdesc.desc(),
filter_data + filter_offset * g,
args1.cdesc.desc(),
fwd_result.algo,
cudnn_workspace,
workspace_size,
&beta,
args1.odesc.desc(),
dx_data + x_offset * g));
};
#endif // PADDLE_WITH_HIP
workspace_handle.RunFunc(cudnn_func, workspace_size);
}
if (data_layout == GPUDNNDataLayout::kNHWC) {
DenseTensor dx_transpose;
DenseTensor dx_nchw;
dx_nchw.ShareDataWith(*dx);
dx_nchw.Resize(make_ddim(x_vec));
if (strides.size() == 2U) {
std::vector<int> axis = {0, 2, 3, 1};
dx_transpose = Transpose<T, Context>(ctx, dx_nchw, axis);
*dx = dx_transpose;
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 2, 3, 4, 1};
dx_transpose = Transpose<T, Context>(ctx, dx_nchw, axis);
*dx = dx_transpose;
}
}
}
// ------------------- cudnn conv backward filter ---------------------
if (dfilter) {
// Because beta is zero, it is unnecessary to reset dfilter.
// Gradient with respect to the filter
for (int g = 0; g < groups; g++) {
#ifdef PADDLE_WITH_HIP
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionBackwardWeights(
handle,
&alpha,
args2.odesc.desc(),
x_data + x_offset * g,
args2.idesc.desc(),
dout_data + dout_offset * g,
args2.cdesc.desc(),
filter_result.algo,
&beta,
args2.wdesc.desc(),
dfilter_data + filter_offset * g,
cudnn_workspace,
workspace_size));
};
#else // PADDLE_WITH_HIP
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardFilter(
handle,
&alpha,
args2.idesc.desc(),
dout_data + dout_offset * g,
args2.odesc.desc(),
x_data + x_offset * g,
args2.cdesc.desc(),
filter_result.algo,
cudnn_workspace,
workspace_size,
&beta,
args2.wdesc.desc(),
dfilter_data + filter_offset * g));
};
#endif // PADDLE_WITH_HIP
workspace_handle.RunFunc(cudnn_func, workspace_size);
}
}
}
template <typename T, typename Context>
void Conv2dTransposeGradGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& filter,
const DenseTensor& dout,
const std::vector<int>& strides,
const std::vector<int>& paddings_,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_,
const std::string& data_format,
DenseTensor* dx,
DenseTensor* dfilter) {
ConvTransposeGradRawGPUDNNKernel<T, Context>(ctx,
x,
filter,
dout,
strides,
paddings_,
padding_algorithm,
groups,
dilations_,
data_format,
dx,
dfilter);
}
/*
* Inputs: I, filter, dout, ddI, ddfilter
* Outputs: ddout, dfilter, dI
* ddo = conv_bp_data(filter, ddI) + conv_bp_data(ddfilter, I)
* dfilter = conv_bp_filter(dout, ddI)
* dI = conv(dout, ddfilter)
*/
template <typename T, typename Context>
void Conv2dTransposeDoubleGradGPUDNNKernel(
const Context& ctx,
const DenseTensor& x,
const DenseTensor& filter,
const DenseTensor& dout,
const DenseTensor& ddx,
const DenseTensor& ddfilter,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
DenseTensor* dx,
DenseTensor* dfilter,
DenseTensor* ddout) {
if (dx) {
ctx.template Alloc<T>(dx);
}
if (dfilter) {
ctx.template Alloc<T>(dfilter);
}
if (ddout) {
ctx.template Alloc<T>(ddout);
funcs::SetConstant<Context, T> set_zero;
set_zero(ctx, ddout, static_cast<T>(0));
}
const T* filter_ = filter.data<T>();
const T* dout_ = dout.data<T>();
const T* ddx_ = nullptr;
const T* ddfilter_ = nullptr;
T* dx_ = nullptr;
T* dfilter_ = nullptr;
T* ddout_ = nullptr;
T* transformed_dx_ = nullptr;
std::vector<int> paddings_ = paddings;
std::vector<int> dilations_ = dilations;
bool deterministic = FLAGS_cudnn_deterministic;
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform DenseTensors to channel first-----------
DenseTensor transformed_x_channel(x.type());
DenseTensor transformed_dout_channel(dout.type());
DenseTensor transformed_ddx_channel(x.type());
DenseTensor transformed_dx_channel(x.type());
DenseTensor transformed_ddout_channel(dout.type());
if (channel_last) {
ResizeToChannelFirst<Context, T>(ctx, &x, &transformed_x_channel);
TransToChannelFirst<Context, T>(ctx, &x, &transformed_x_channel);
ResizeToChannelFirst<Context, T>(ctx, &dout, &transformed_dout_channel);
TransToChannelFirst<Context, T>(ctx, &dout, &transformed_dout_channel);
ResizeToChannelFirst<Context, T>(ctx, &ddx, &transformed_ddx_channel);
TransToChannelFirst<Context, T>(ctx, &ddx, &transformed_ddx_channel);
if (dx) {
ResizeToChannelFirst<Context, T>(ctx, dx, &transformed_dx_channel);
ctx.template Alloc<T>(&transformed_dx_channel);
}
if (ddout) {
ResizeToChannelFirst<Context, T>(ctx, ddout, &transformed_ddout_channel);
}
} else {
transformed_x_channel = x;
transformed_dout_channel = dout;
transformed_ddx_channel = ddx;
if (dx) {
transformed_dx_channel = *dx;
}
}
std::vector<int> out_vec = vectorize<int>(transformed_dout_channel.dims());
auto x_dims = transformed_x_channel.dims();
auto filter_dims = filter.dims();
DDim x_data_dims = slice_ddim(x_dims, 2, x_dims.size());
DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(
&paddings_, &dilations_, padding_algorithm, x_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = funcs::IsSymmetricPadding(paddings_, data_dim);
DenseTensor transformed_x(x.type());
DenseTensor transformed_ddx(x.type());
DenseTensor transformed_dout(dout.type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(x.dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
std::vector<int> new_output_grad_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_x_channel.dims()[0];
new_input_shape_vec[1] = transformed_x_channel.dims()[1];
new_output_grad_shape_vec[0] = transformed_dout_channel.dims()[0];
new_output_grad_shape_vec[1] = transformed_dout_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings_[2 * i] - paddings_[2 * i + 1]);
padding_common[i] = ::min(paddings_[2 * i], paddings_[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_x_channel.dims()[i + 2] + padding_diff[i];
new_output_grad_shape_vec[i + 2] =
transformed_dout_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings_[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings_[2 * i + 1] - padding_common[i];
}
DDim new_input_shape(make_ddim(new_input_shape_vec));
transformed_x.Resize(new_input_shape);
transformed_ddx.Resize(new_input_shape);
transformed_dout.Resize(make_ddim(new_output_grad_shape_vec));
ctx.template Alloc<T>(&transformed_x);
ctx.template Alloc<T>(&transformed_ddx);
ctx.template Alloc<T>(&transformed_dout);
// pad for input
const int rank = x.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
funcs::PadFunction<Context, T, 4>(
ctx, input_pad, transformed_x_channel, pad_value, &transformed_x);
funcs::PadFunction<Context, T, 4>(ctx,
input_pad,
transformed_dout_channel,
pad_value,
&transformed_dout);
funcs::PadFunction<Context, T, 4>(ctx,
input_pad,
transformed_ddx_channel,
pad_value,
&transformed_ddx);
} break;
case 5: {
funcs::PadFunction<Context, T, 5>(
ctx, input_pad, transformed_x_channel, pad_value, &transformed_x);
funcs::PadFunction<Context, T, 5>(ctx,
input_pad,
transformed_ddx_channel,
pad_value,
&transformed_ddx);
} break;
default:
PADDLE_THROW(errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_x = transformed_x_channel;
transformed_dout = transformed_dout_channel;
transformed_ddx = transformed_ddx_channel;
if (paddings_.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings_[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings_[2 * i];
}
}
}
std::vector<int64_t> starts(data_dim, 0);
std::vector<int64_t> ends(data_dim, 0);
std::vector<int64_t> axes(data_dim, 0);
for (size_t i = 0; i < data_dim; ++i) {
starts[i] = input_pad[2 * i + 4] * (strides[i] + 1);
ends[i] = starts[i] + out_vec[i + 2];
axes[i] = i + 2;
}
std::vector<int> transformed_out_vec = out_vec;
for (size_t i = 0; i < data_dim; ++i) {
transformed_out_vec[i + 2] =
out_vec[i + 2] +
(input_pad[2 * i + 4] + input_pad[2 * i + 5]) * strides[i] -
2 * padding_common[i] + paddings_[2 * i] + paddings_[2 * i + 1];
}
if (!is_sys_pad) {
transformed_ddout_channel.Resize(make_ddim(transformed_out_vec));
ctx.template Alloc<T>(&transformed_ddout_channel);
} else {
ctx.template Alloc<T>(ddout);
transformed_ddout_channel = *ddout;
transformed_ddout_channel.Resize(make_ddim(transformed_out_vec));
}
const T* x_ = transformed_x.data<T>();
int iwo_group = groups;
int c_group = 1;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_group = 1;
c_group = groups;
groups = 1;
#endif
auto dtype = paddle::platform::CudnnDataType<T>::type;
auto handle = ctx.cudnn_handle();
paddle::operators::ConvArgs args1{&transformed_ddout_channel,
&filter,
&transformed_ddx,
strides,
padding_common,
dilations_,
dtype};
paddle::operators::ConvArgs args2{&transformed_ddout_channel,
&ddfilter,
&transformed_x,
strides,
padding_common,
dilations_,
dtype};
paddle::operators::ConvArgs args3{&transformed_dout,
dfilter,
&transformed_ddx_channel,
strides,
padding_common,
dilations_,
dtype};
paddle::operators::ConvArgs args4{&transformed_dout,
&ddfilter,
&transformed_dx_channel,
strides,
padding_common,
dilations_,
dtype};
#ifdef PADDLE_WITH_HIP
paddle::operators::SearchResult<miopenConvBwdDataAlgorithm_t> bwd_result1;
paddle::operators::SearchResult<miopenConvBwdDataAlgorithm_t> bwd_result2;
paddle::operators::SearchResult<miopenConvBwdWeightsAlgorithm_t>
filter_result;
paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result;
#else
paddle::operators::SearchResult<cudnnConvolutionBwdDataAlgo_t> bwd_result1;
paddle::operators::SearchResult<cudnnConvolutionBwdDataAlgo_t> bwd_result2;
paddle::operators::SearchResult<cudnnConvolutionBwdFilterAlgo_t>
filter_result;
paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result;
#endif
auto layout = paddle::platform::GetCudnnTensorFormat(GPUDNNDataLayout::kNCHW);
// ddo = conv(ddI, filter) + conv(I, ddfilter)
size_t workspace_size = 0;
T* transformed_ddout_channel_ = nullptr;
if (ddout) {
ddout_ = ddout->data<T>();
transformed_ddout_channel_ = transformed_ddout_channel.data<T>();
args1.handle = handle;
args1.idesc.set(transformed_ddout_channel, iwo_group);
args1.wdesc.set(filter, layout, iwo_group);
args1.odesc.set(transformed_ddx, iwo_group);
args1.cdesc.set(dtype,
padding_common,
strides,
dilations_,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search1 =
paddle::operators::SearchAlgorithm<miopenConvBwdDataAlgorithm_t>;
workspace_size = search1::GetWorkspaceSize(args1);
bwd_result1.algo =
search1::Find<T>(args1, false, deterministic, workspace_size, ctx);
#else
using search1 =
paddle::operators::SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
bwd_result1 = search1::Find<T>(args1, false, deterministic, ctx);
workspace_size = search1::GetWorkspaceSize(args1, bwd_result1.algo);
#endif
ddfilter_ = ddfilter.data<T>();
args2.handle = handle;
args2.idesc.set(transformed_ddout_channel, iwo_group);
args2.wdesc.set(ddfilter, layout, iwo_group);
args2.odesc.set(transformed_x, iwo_group);
args2.cdesc.set(dtype,
padding_common,
strides,
dilations_,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search2 =
paddle::operators::SearchAlgorithm<miopenConvBwdDataAlgorithm_t>;
workspace_size = ::max(workspace_size, search2::GetWorkspaceSize(args2));
bwd_result2.algo =
search2::Find<T>(args2, false, deterministic, workspace_size, ctx);
#else
using search2 =
paddle::operators::SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
bwd_result2 = search2::Find<T>(args2, false, deterministic, ctx);
workspace_size = ::max(
workspace_size, search2::GetWorkspaceSize(args2, bwd_result2.algo));
#endif
}
if (dfilter) {
dfilter_ = dfilter->data<T>();
args3.handle = handle;
args3.idesc.set(transformed_dout, iwo_group);
args3.wdesc.set(*dfilter, layout, iwo_group);
args3.odesc.set(transformed_ddx_channel, iwo_group);
args3.cdesc.set(dtype,
padding_common,
strides,
dilations_,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search3 =
paddle::operators::SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>;
workspace_size = ::max(workspace_size, search3::GetWorkspaceSize(args3));
filter_result.algo =
search3::Find<T>(args3, false, deterministic, workspace_size, ctx);
#else
using search3 =
paddle::operators::SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_result = search3::Find<T>(args3, false, deterministic, ctx);
workspace_size = ::max(
workspace_size, search3::GetWorkspaceSize(args3, filter_result.algo));
#endif
}
if (dx) {
transformed_dx_ = transformed_dx_channel.data<T>();
args4.handle = handle;
args4.idesc.set(transformed_dout, iwo_group);
args4.wdesc.set(ddfilter, layout, iwo_group);
args4.odesc.set(transformed_dx_channel, iwo_group);
args4.cdesc.set(dtype,
padding_common,
strides,
dilations_,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search4 =
paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size = ::max(workspace_size, search4::GetWorkspaceSize(args4));
fwd_result.algo =
search4::Find<T>(args4, false, deterministic, workspace_size, ctx);
#else
using search4 =
paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_result = search4::Find<T>(args4, false, deterministic, ctx);
workspace_size = ::max(
workspace_size, search4::GetWorkspaceSize(args4, fwd_result.algo));
#endif
}
int i_n, i_c, i_d, i_h, i_w;
paddle::operators::GetNCDHW(transformed_x.dims(),
GPUDNNDataLayout::kNCHW,
&i_n,
&i_c,
&i_d,
&i_h,
&i_w);
int o_n, o_c, o_d, o_h, o_w;
paddle::operators::GetNCDHW(transformed_dout.dims(),
GPUDNNDataLayout::kNCHW,
&o_n,
&o_c,
&o_d,
&o_h,
&o_w);
int group_offset_in =
transformed_x.numel() / transformed_x.dims()[0] / groups;
int group_offset_out =
transformed_dout.numel() / transformed_dout.dims()[0] / groups;
int group_offset_filter = filter.numel() / groups;
paddle::operators::ScalingParamType<T> alpha = 1.0f;
paddle::operators::ScalingParamType<T> beta = 0.0f;
auto wkspace_handle = ctx.cudnn_workspace_handle();
if (ddout) {
ddx_ = transformed_ddx.data<T>();
for (int i = 0; i < groups; i++) {
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionBackwardData(
handle,
&alpha,
args1.odesc.desc(),
ddx_ + i * group_offset_in,
args1.wdesc.desc(),
filter_ + i * group_offset_filter,
args1.cdesc.desc(),
bwd_result1.algo,
&beta,
args1.idesc.desc(),
transformed_ddout_channel_ + i * group_offset_out,
workspace_ptr,
workspace_size));
},
workspace_size);
#else // PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardData(
handle,
&alpha,
args1.wdesc.desc(),
filter_ + i * group_offset_filter,
args1.odesc.desc(),
ddx_ + i * group_offset_in,
args1.cdesc.desc(),
bwd_result1.algo,
workspace_ptr,
workspace_size,
&beta,
args1.idesc.desc(),
transformed_ddout_channel_ + i * group_offset_out));
},
workspace_size);
#endif // PADDLE_WITH_HIP
}
for (int i = 0; i < groups; i++) {
#ifdef PADDLE_WITH_HIP
// MIOPEN ONLY support beta to be 0.0f
DenseTensor conv_x_ddfilter(dout.type());
conv_x_ddfilter.Resize(transformed_ddout_channel.dims());
T* conv_x_ddfilter_data = ctx.template Alloc<T>(&conv_x_ddfilter);
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionBackwardData(
handle,
&alpha,
args2.odesc.desc(),
x_ + i * group_offset_in,
args2.wdesc.desc(),
ddfilter_ + i * group_offset_filter,
args2.cdesc.desc(),
bwd_result2.algo,
&beta,
args2.idesc.desc(),
conv_x_ddfilter_data + i * group_offset_out,
workspace_ptr,
workspace_size));
},
workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenOpTensor(
handle,
miopenTensorOpAdd,
&alpha,
args2.idesc.desc(),
transformed_ddout_channel_ + i * group_offset_out,
&alpha,
args2.idesc.desc(),
conv_x_ddfilter_data + i * group_offset_out,
&beta,
args2.idesc.desc(),
transformed_ddout_channel_ + i * group_offset_out));
#else // PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardData(
handle,
&alpha,
args2.wdesc.desc(),
ddfilter_ + i * group_offset_filter,
args2.odesc.desc(),
x_ + i * group_offset_in,
args2.cdesc.desc(),
bwd_result2.algo,
workspace_ptr,
workspace_size,
&alpha,
args2.idesc.desc(),
transformed_ddout_channel_ + i * group_offset_out));
},
workspace_size);
#endif // PADDLE_WITH_HIP
}
if ((!is_sys_pad) && (!channel_last)) {
if (strides.size() == 2U) {
funcs::Slice<Context, T, 4>(
ctx, &transformed_ddout_channel, ddout, starts, ends, axes);
} else if (!is_sys_pad && strides.size() == 3U) {
funcs::Slice<Context, T, 5>(
ctx, &transformed_ddout_channel, ddout, starts, ends, axes);
}
} else if ((!is_sys_pad) && (channel_last)) {
if (strides.size() == 2U) {
funcs::Slice<Context, T, 4>(ctx,
&transformed_ddout_channel,
&transformed_ddout_channel,
starts,
ends,
axes);
} else if (!is_sys_pad && strides.size() == 3U) {
funcs::Slice<Context, T, 5>(ctx,
&transformed_ddout_channel,
&transformed_ddout_channel,
starts,
ends,
axes);
}
TransToChannelLast<Context, T>(ctx, &transformed_ddout_channel, ddout);
}
}
T* transformed_dout_channel_ = transformed_dout.data<T>();
if (dfilter) {
ddx_ = transformed_ddx_channel.data<T>();
for (int i = 0; i < groups; i++) {
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::miopenConvolutionBackwardWeights(
handle,
&alpha,
args3.odesc.desc(),
ddx_ + i * group_offset_in,
args3.idesc.desc(),
transformed_dout_channel_ + i * group_offset_out,
args3.cdesc.desc(),
filter_result.algo,
&beta,
args3.wdesc.desc(),
dfilter_ + i * group_offset_filter,
workspace_ptr,
workspace_size));
},
workspace_size);
#else // PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardFilter(
handle,
&alpha,
args3.idesc.desc(),
transformed_dout_channel_ + i * group_offset_out,
args3.odesc.desc(),
ddx_ + i * group_offset_in,
args3.cdesc.desc(),
filter_result.algo,
workspace_ptr,
workspace_size,
&beta,
args3.wdesc.desc(),
dfilter_ + i * group_offset_filter));
},
workspace_size);
#endif // PADDLE_WITH_HIP
}
}
if (dx) {
ddfilter_ = ddfilter.data<T>();
for (int i = 0; i < groups; i++) {
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionForward(
handle,
&alpha,
args4.idesc.desc(),
transformed_dout_channel_ + i * group_offset_out,
args4.wdesc.desc(),
ddfilter_ + i * group_offset_filter,
args4.cdesc.desc(),
fwd_result.algo,
&beta,
args4.odesc.desc(),
transformed_dx_ + i * group_offset_in,
workspace_ptr,
workspace_size));
},
workspace_size);
#else // PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionForward(
handle,
&alpha,
args4.idesc.desc(),
transformed_dout_channel_ + i * group_offset_out,
args4.wdesc.desc(),
ddfilter_ + i * group_offset_filter,
args4.cdesc.desc(),
fwd_result.algo,
workspace_ptr,
workspace_size,
&beta,
args4.odesc.desc(),
transformed_dx_ + i * group_offset_in));
},
workspace_size);
#endif // PADDLE_WITH_HIP
}
if (channel_last) {
TransToChannelLast<Context, T>(ctx, &transformed_dx_channel, dx);
}
}
}
template <typename T, typename Context>
void Conv3dTransposeGradGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& filter,
const DenseTensor& dout,
const std::vector<int>& strides,
const std::vector<int>& paddings_,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_,
const std::string& data_format,
DenseTensor* dx,
DenseTensor* dfilter) {
ConvTransposeGradRawGPUDNNKernel<T, Context>(ctx,
x,
filter,
dout,
strides,
paddings_,
padding_algorithm,
groups,
dilations_,
data_format,
dx,
dfilter);
}
} // namespace phi
using float16 = phi::dtype::float16;
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
PD_REGISTER_KERNEL(conv2d_transpose_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv2dTransposeGradGPUDNNKernel,
float,
float16) {}
PD_REGISTER_KERNEL(conv2d_transpose_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv2dTransposeDoubleGradGPUDNNKernel,
float,
float16) {}
PD_REGISTER_KERNEL(conv3d_transpose_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv3dTransposeGradGPUDNNKernel,
float,
float16) {}
#else
PD_REGISTER_KERNEL(conv2d_transpose_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv2dTransposeGradGPUDNNKernel,
float,
double,
float16) {}
PD_REGISTER_KERNEL(conv2d_transpose_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv2dTransposeDoubleGradGPUDNNKernel,
float,
double,
float16) {}
PD_REGISTER_KERNEL(conv3d_transpose_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv3dTransposeGradGPUDNNKernel,
float,
double,
float16) {}
#endif
| 8c8446fdc05f4a0c587e7028222a56f00c310fe3.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/phi/backends/dynload/cudnn.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/conv_transpose_grad_kernel.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/funcs/batch_norm_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/padding.h"
#include "paddle/phi/kernels/funcs/slice.h"
#include "paddle/phi/kernels/transpose_kernel.h"
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/operators/conv_miopen_helper.h"
#include "paddle/fluid/platform/device/gpu/rocm/miopen_helper.h"
#else
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#include "paddle/fluid/platform/device/gpu/cuda/cudnn_helper.h"
#endif
namespace phi {
using GPUDNNDataLayout = paddle::platform::DataLayout;
template <typename T, typename Context>
void ConvTransposeGradRawGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& filter,
const DenseTensor& dout,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
DenseTensor* dx,
DenseTensor* dfilter) {
const T* filter_data = filter.data<T>();
std::vector<int> paddings_ = paddings;
std::vector<int> dilations_ =
dilations; // cudnn v5 does not support dilations
const GPUDNNDataLayout data_layout =
(data_format != "NHWC" ? GPUDNNDataLayout::kNCHW
: GPUDNNDataLayout::kNHWC);
// if channel_last, transpose to channel_first
DenseTensor x_transpose;
DenseTensor dout_transpose;
std::vector<int> x_vec = vectorize<int>(x.dims());
std::vector<int> out_vec = vectorize<int>(dout.dims());
if (data_layout == GPUDNNDataLayout::kNHWC) {
if (strides.size() == 2U) {
std::vector<int> axis = {0, 3, 1, 2};
for (size_t i = 0; i < axis.size(); ++i) {
x_vec[i] = x.dims()[axis[i]];
out_vec[i] = dout.dims()[axis[i]];
}
x_transpose = Transpose<T, Context>(ctx, x, axis);
dout_transpose = Transpose<T, Context>(ctx, dout, axis);
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 4, 1, 2, 3};
for (size_t i = 0; i < axis.size(); ++i) {
x_vec[i] = x.dims()[axis[i]];
out_vec[i] = dout.dims()[axis[i]];
}
x_transpose = Transpose<T, Context>(ctx, x, axis);
dout_transpose = Transpose<T, Context>(ctx, dout, axis);
}
} else {
x_transpose = x;
dout_transpose = dout;
}
// update padding and dilation
auto x_dims = x_transpose.dims();
auto filter_dims = filter.dims();
DDim x_data_dims;
x_data_dims = slice_ddim(x_dims, 2, x_dims.size());
DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(
&paddings_, &dilations_, padding_algorithm, x_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = funcs::IsSymmetricPadding(paddings_, data_dim);
std::vector<int> x_pad(x_dims.size() * 2, 0);
DenseTensor transformed_dout;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_dout_shape_vec(data_dim + 2);
new_dout_shape_vec[0] = dout_transpose.dims()[0];
new_dout_shape_vec[1] = dout_transpose.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings_[2 * i] - paddings_[2 * i + 1]);
padding_common[i] = std::min(paddings_[2 * i], paddings_[2 * i + 1]);
new_dout_shape_vec[i + 2] =
dout_transpose.dims()[i + 2] + padding_diff[i];
x_pad[2 * i + 4] = paddings_[2 * i] - padding_common[i];
x_pad[2 * i + 4 + 1] = paddings_[2 * i + 1] - padding_common[i];
}
transformed_dout.Resize(make_ddim(new_dout_shape_vec));
ctx.template Alloc<T>(&transformed_dout);
const int rank = x_transpose.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
funcs::PadFunction<Context, T, 4>(
ctx, x_pad, dout_transpose, pad_value, &transformed_dout);
} break;
case 5: {
funcs::PadFunction<Context, T, 5>(
ctx, x_pad, dout_transpose, pad_value, &transformed_dout);
} break;
default:
PADDLE_THROW(errors::InvalidArgument(
"Op(ConvTranspose) only supports 4-D or 5-D x DenseTensor."));
}
} else {
transformed_dout = dout_transpose;
if (paddings_.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings_[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings_[2 * i];
}
}
}
const T* x_data = x_transpose.data<T>();
const T* dout_data = transformed_dout.data<T>();
out_vec = vectorize<int>(transformed_dout.dims());
// ------------------- cudnn descriptors ---------------------
GPUDNNDataLayout layout;
if (strides.size() == 2U) {
layout = GPUDNNDataLayout::kNCHW;
} else {
layout = GPUDNNDataLayout::kNCDHW;
}
int iwo_groups = groups;
int c_groups = 1;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups = 1;
c_groups = groups;
groups = 1;
#endif
auto dtype = paddle::platform::CudnnDataType<T>::type;
paddle::operators::ConvArgs args1{&transformed_dout,
&filter,
&x_transpose,
strides,
padding_common,
dilations_,
dtype};
paddle::operators::ConvArgs args2{&transformed_dout,
&filter,
&x_transpose,
strides,
padding_common,
dilations_,
dtype};
#ifdef PADDLE_WITH_HIP
paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result;
paddle::operators::SearchResult<miopenConvBwdWeightsAlgorithm_t>
filter_result;
#else
paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result;
paddle::operators::SearchResult<cudnnConvolutionBwdFilterAlgo_t>
filter_result;
#endif
auto layout_tensor = paddle::platform::GetCudnnTensorFormat(layout);
size_t workspace_size = 0;
auto handle = ctx.cudnn_handle();
bool deterministic = FLAGS_cudnn_deterministic;
T* dx_data = nullptr;
T* dfilter_data = nullptr;
if (dx) {
dx_data = ctx.template Alloc<T>(dx);
args1.handle = handle;
args1.idesc.set(transformed_dout, iwo_groups);
args1.wdesc.set(filter, layout_tensor, iwo_groups);
args1.odesc.set(x_transpose, iwo_groups);
args1.cdesc.set(dtype,
padding_common,
strides,
dilations_,
paddle::platform::AllowTF32Cudnn(),
c_groups);
#ifdef PADDLE_WITH_HIP
using search1 =
paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size = std::max(workspace_size, search1::GetWorkspaceSize(args1));
fwd_result.algo =
search1::Find<T>(args1, false, deterministic, workspace_size, ctx);
#else
using search1 =
paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_result = search1::Find<T>(args1, false, deterministic, ctx);
workspace_size = std::max(
workspace_size, search1::GetWorkspaceSize(args1, fwd_result.algo));
#endif
}
if (dfilter) {
dfilter_data = ctx.template Alloc<T>(dfilter);
args2.handle = handle;
args2.idesc.set(transformed_dout, iwo_groups);
args2.wdesc.set(*dfilter, layout_tensor, iwo_groups);
args2.odesc.set(x_transpose, iwo_groups);
args2.cdesc.set(dtype,
padding_common,
strides,
dilations_,
paddle::platform::AllowTF32Cudnn(),
c_groups);
#ifdef PADDLE_WITH_HIP
using search2 =
paddle::operators::SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>;
workspace_size = std::max(workspace_size, search2::GetWorkspaceSize(args2));
filter_result.algo =
search2::Find<T>(args2, false, deterministic, workspace_size, ctx);
#else
using search2 =
paddle::operators::SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_result = search2::Find<T>(args2, false, deterministic, ctx);
workspace_size = std::max(
workspace_size, search2::GetWorkspaceSize(args2, filter_result.algo));
#endif
}
// ------------------- cudnn conv backward data ---------------------
// FIxME(typhoonzero): template type T may not be the same as cudnn call.
int x_offset = x.numel() / x.dims()[0] / groups;
int dout_offset =
transformed_dout.numel() / transformed_dout.dims()[0] / groups;
int filter_offset = filter.numel() / groups;
paddle::operators::ScalingParamType<T> alpha = 1.0f;
paddle::operators::ScalingParamType<T> beta = 0.0f;
auto workspace_handle = ctx.cudnn_workspace_handle();
if (dx) {
// Because beta is zero, it is unnecessary to reset dx.
for (int g = 0; g < groups; g++) {
#ifdef PADDLE_WITH_HIP
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::miopenConvolutionForward(handle,
&alpha,
args1.idesc.desc(),
dout_data + dout_offset * g,
args1.wdesc.desc(),
filter_data + filter_offset * g,
args1.cdesc.desc(),
fwd_result.algo,
&beta,
args1.odesc.desc(),
dx_data + x_offset * g,
cudnn_workspace,
workspace_size));
};
#else // PADDLE_WITH_HIP
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::cudnnConvolutionForward(handle,
&alpha,
args1.idesc.desc(),
dout_data + dout_offset * g,
args1.wdesc.desc(),
filter_data + filter_offset * g,
args1.cdesc.desc(),
fwd_result.algo,
cudnn_workspace,
workspace_size,
&beta,
args1.odesc.desc(),
dx_data + x_offset * g));
};
#endif // PADDLE_WITH_HIP
workspace_handle.RunFunc(cudnn_func, workspace_size);
}
if (data_layout == GPUDNNDataLayout::kNHWC) {
DenseTensor dx_transpose;
DenseTensor dx_nchw;
dx_nchw.ShareDataWith(*dx);
dx_nchw.Resize(make_ddim(x_vec));
if (strides.size() == 2U) {
std::vector<int> axis = {0, 2, 3, 1};
dx_transpose = Transpose<T, Context>(ctx, dx_nchw, axis);
*dx = dx_transpose;
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 2, 3, 4, 1};
dx_transpose = Transpose<T, Context>(ctx, dx_nchw, axis);
*dx = dx_transpose;
}
}
}
// ------------------- cudnn conv backward filter ---------------------
if (dfilter) {
// Because beta is zero, it is unnecessary to reset dfilter.
// Gradient with respect to the filter
for (int g = 0; g < groups; g++) {
#ifdef PADDLE_WITH_HIP
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionBackwardWeights(
handle,
&alpha,
args2.odesc.desc(),
x_data + x_offset * g,
args2.idesc.desc(),
dout_data + dout_offset * g,
args2.cdesc.desc(),
filter_result.algo,
&beta,
args2.wdesc.desc(),
dfilter_data + filter_offset * g,
cudnn_workspace,
workspace_size));
};
#else // PADDLE_WITH_HIP
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardFilter(
handle,
&alpha,
args2.idesc.desc(),
dout_data + dout_offset * g,
args2.odesc.desc(),
x_data + x_offset * g,
args2.cdesc.desc(),
filter_result.algo,
cudnn_workspace,
workspace_size,
&beta,
args2.wdesc.desc(),
dfilter_data + filter_offset * g));
};
#endif // PADDLE_WITH_HIP
workspace_handle.RunFunc(cudnn_func, workspace_size);
}
}
}
template <typename T, typename Context>
void Conv2dTransposeGradGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& filter,
const DenseTensor& dout,
const std::vector<int>& strides,
const std::vector<int>& paddings_,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_,
const std::string& data_format,
DenseTensor* dx,
DenseTensor* dfilter) {
ConvTransposeGradRawGPUDNNKernel<T, Context>(ctx,
x,
filter,
dout,
strides,
paddings_,
padding_algorithm,
groups,
dilations_,
data_format,
dx,
dfilter);
}
/*
* Inputs: I, filter, dout, ddI, ddfilter
* Outputs: ddout, dfilter, dI
* ddo = conv_bp_data(filter, ddI) + conv_bp_data(ddfilter, I)
* dfilter = conv_bp_filter(dout, ddI)
* dI = conv(dout, ddfilter)
*/
template <typename T, typename Context>
void Conv2dTransposeDoubleGradGPUDNNKernel(
const Context& ctx,
const DenseTensor& x,
const DenseTensor& filter,
const DenseTensor& dout,
const DenseTensor& ddx,
const DenseTensor& ddfilter,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
DenseTensor* dx,
DenseTensor* dfilter,
DenseTensor* ddout) {
if (dx) {
ctx.template Alloc<T>(dx);
}
if (dfilter) {
ctx.template Alloc<T>(dfilter);
}
if (ddout) {
ctx.template Alloc<T>(ddout);
funcs::SetConstant<Context, T> set_zero;
set_zero(ctx, ddout, static_cast<T>(0));
}
const T* filter_ = filter.data<T>();
const T* dout_ = dout.data<T>();
const T* ddx_ = nullptr;
const T* ddfilter_ = nullptr;
T* dx_ = nullptr;
T* dfilter_ = nullptr;
T* ddout_ = nullptr;
T* transformed_dx_ = nullptr;
std::vector<int> paddings_ = paddings;
std::vector<int> dilations_ = dilations;
bool deterministic = FLAGS_cudnn_deterministic;
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform DenseTensors to channel first-----------
DenseTensor transformed_x_channel(x.type());
DenseTensor transformed_dout_channel(dout.type());
DenseTensor transformed_ddx_channel(x.type());
DenseTensor transformed_dx_channel(x.type());
DenseTensor transformed_ddout_channel(dout.type());
if (channel_last) {
ResizeToChannelFirst<Context, T>(ctx, &x, &transformed_x_channel);
TransToChannelFirst<Context, T>(ctx, &x, &transformed_x_channel);
ResizeToChannelFirst<Context, T>(ctx, &dout, &transformed_dout_channel);
TransToChannelFirst<Context, T>(ctx, &dout, &transformed_dout_channel);
ResizeToChannelFirst<Context, T>(ctx, &ddx, &transformed_ddx_channel);
TransToChannelFirst<Context, T>(ctx, &ddx, &transformed_ddx_channel);
if (dx) {
ResizeToChannelFirst<Context, T>(ctx, dx, &transformed_dx_channel);
ctx.template Alloc<T>(&transformed_dx_channel);
}
if (ddout) {
ResizeToChannelFirst<Context, T>(ctx, ddout, &transformed_ddout_channel);
}
} else {
transformed_x_channel = x;
transformed_dout_channel = dout;
transformed_ddx_channel = ddx;
if (dx) {
transformed_dx_channel = *dx;
}
}
std::vector<int> out_vec = vectorize<int>(transformed_dout_channel.dims());
auto x_dims = transformed_x_channel.dims();
auto filter_dims = filter.dims();
DDim x_data_dims = slice_ddim(x_dims, 2, x_dims.size());
DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(
&paddings_, &dilations_, padding_algorithm, x_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = funcs::IsSymmetricPadding(paddings_, data_dim);
DenseTensor transformed_x(x.type());
DenseTensor transformed_ddx(x.type());
DenseTensor transformed_dout(dout.type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(x.dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
std::vector<int> new_output_grad_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_x_channel.dims()[0];
new_input_shape_vec[1] = transformed_x_channel.dims()[1];
new_output_grad_shape_vec[0] = transformed_dout_channel.dims()[0];
new_output_grad_shape_vec[1] = transformed_dout_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings_[2 * i] - paddings_[2 * i + 1]);
padding_common[i] = std::min(paddings_[2 * i], paddings_[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_x_channel.dims()[i + 2] + padding_diff[i];
new_output_grad_shape_vec[i + 2] =
transformed_dout_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings_[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings_[2 * i + 1] - padding_common[i];
}
DDim new_input_shape(make_ddim(new_input_shape_vec));
transformed_x.Resize(new_input_shape);
transformed_ddx.Resize(new_input_shape);
transformed_dout.Resize(make_ddim(new_output_grad_shape_vec));
ctx.template Alloc<T>(&transformed_x);
ctx.template Alloc<T>(&transformed_ddx);
ctx.template Alloc<T>(&transformed_dout);
// pad for input
const int rank = x.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
funcs::PadFunction<Context, T, 4>(
ctx, input_pad, transformed_x_channel, pad_value, &transformed_x);
funcs::PadFunction<Context, T, 4>(ctx,
input_pad,
transformed_dout_channel,
pad_value,
&transformed_dout);
funcs::PadFunction<Context, T, 4>(ctx,
input_pad,
transformed_ddx_channel,
pad_value,
&transformed_ddx);
} break;
case 5: {
funcs::PadFunction<Context, T, 5>(
ctx, input_pad, transformed_x_channel, pad_value, &transformed_x);
funcs::PadFunction<Context, T, 5>(ctx,
input_pad,
transformed_ddx_channel,
pad_value,
&transformed_ddx);
} break;
default:
PADDLE_THROW(errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_x = transformed_x_channel;
transformed_dout = transformed_dout_channel;
transformed_ddx = transformed_ddx_channel;
if (paddings_.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings_[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings_[2 * i];
}
}
}
std::vector<int64_t> starts(data_dim, 0);
std::vector<int64_t> ends(data_dim, 0);
std::vector<int64_t> axes(data_dim, 0);
for (size_t i = 0; i < data_dim; ++i) {
starts[i] = input_pad[2 * i + 4] * (strides[i] + 1);
ends[i] = starts[i] + out_vec[i + 2];
axes[i] = i + 2;
}
std::vector<int> transformed_out_vec = out_vec;
for (size_t i = 0; i < data_dim; ++i) {
transformed_out_vec[i + 2] =
out_vec[i + 2] +
(input_pad[2 * i + 4] + input_pad[2 * i + 5]) * strides[i] -
2 * padding_common[i] + paddings_[2 * i] + paddings_[2 * i + 1];
}
if (!is_sys_pad) {
transformed_ddout_channel.Resize(make_ddim(transformed_out_vec));
ctx.template Alloc<T>(&transformed_ddout_channel);
} else {
ctx.template Alloc<T>(ddout);
transformed_ddout_channel = *ddout;
transformed_ddout_channel.Resize(make_ddim(transformed_out_vec));
}
const T* x_ = transformed_x.data<T>();
int iwo_group = groups;
int c_group = 1;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_group = 1;
c_group = groups;
groups = 1;
#endif
auto dtype = paddle::platform::CudnnDataType<T>::type;
auto handle = ctx.cudnn_handle();
paddle::operators::ConvArgs args1{&transformed_ddout_channel,
&filter,
&transformed_ddx,
strides,
padding_common,
dilations_,
dtype};
paddle::operators::ConvArgs args2{&transformed_ddout_channel,
&ddfilter,
&transformed_x,
strides,
padding_common,
dilations_,
dtype};
paddle::operators::ConvArgs args3{&transformed_dout,
dfilter,
&transformed_ddx_channel,
strides,
padding_common,
dilations_,
dtype};
paddle::operators::ConvArgs args4{&transformed_dout,
&ddfilter,
&transformed_dx_channel,
strides,
padding_common,
dilations_,
dtype};
#ifdef PADDLE_WITH_HIP
paddle::operators::SearchResult<miopenConvBwdDataAlgorithm_t> bwd_result1;
paddle::operators::SearchResult<miopenConvBwdDataAlgorithm_t> bwd_result2;
paddle::operators::SearchResult<miopenConvBwdWeightsAlgorithm_t>
filter_result;
paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result;
#else
paddle::operators::SearchResult<cudnnConvolutionBwdDataAlgo_t> bwd_result1;
paddle::operators::SearchResult<cudnnConvolutionBwdDataAlgo_t> bwd_result2;
paddle::operators::SearchResult<cudnnConvolutionBwdFilterAlgo_t>
filter_result;
paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result;
#endif
auto layout = paddle::platform::GetCudnnTensorFormat(GPUDNNDataLayout::kNCHW);
// ddo = conv(ddI, filter) + conv(I, ddfilter)
size_t workspace_size = 0;
T* transformed_ddout_channel_ = nullptr;
if (ddout) {
ddout_ = ddout->data<T>();
transformed_ddout_channel_ = transformed_ddout_channel.data<T>();
args1.handle = handle;
args1.idesc.set(transformed_ddout_channel, iwo_group);
args1.wdesc.set(filter, layout, iwo_group);
args1.odesc.set(transformed_ddx, iwo_group);
args1.cdesc.set(dtype,
padding_common,
strides,
dilations_,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search1 =
paddle::operators::SearchAlgorithm<miopenConvBwdDataAlgorithm_t>;
workspace_size = search1::GetWorkspaceSize(args1);
bwd_result1.algo =
search1::Find<T>(args1, false, deterministic, workspace_size, ctx);
#else
using search1 =
paddle::operators::SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
bwd_result1 = search1::Find<T>(args1, false, deterministic, ctx);
workspace_size = search1::GetWorkspaceSize(args1, bwd_result1.algo);
#endif
ddfilter_ = ddfilter.data<T>();
args2.handle = handle;
args2.idesc.set(transformed_ddout_channel, iwo_group);
args2.wdesc.set(ddfilter, layout, iwo_group);
args2.odesc.set(transformed_x, iwo_group);
args2.cdesc.set(dtype,
padding_common,
strides,
dilations_,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search2 =
paddle::operators::SearchAlgorithm<miopenConvBwdDataAlgorithm_t>;
workspace_size = std::max(workspace_size, search2::GetWorkspaceSize(args2));
bwd_result2.algo =
search2::Find<T>(args2, false, deterministic, workspace_size, ctx);
#else
using search2 =
paddle::operators::SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
bwd_result2 = search2::Find<T>(args2, false, deterministic, ctx);
workspace_size = std::max(
workspace_size, search2::GetWorkspaceSize(args2, bwd_result2.algo));
#endif
}
if (dfilter) {
dfilter_ = dfilter->data<T>();
args3.handle = handle;
args3.idesc.set(transformed_dout, iwo_group);
args3.wdesc.set(*dfilter, layout, iwo_group);
args3.odesc.set(transformed_ddx_channel, iwo_group);
args3.cdesc.set(dtype,
padding_common,
strides,
dilations_,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search3 =
paddle::operators::SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>;
workspace_size = std::max(workspace_size, search3::GetWorkspaceSize(args3));
filter_result.algo =
search3::Find<T>(args3, false, deterministic, workspace_size, ctx);
#else
using search3 =
paddle::operators::SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_result = search3::Find<T>(args3, false, deterministic, ctx);
workspace_size = std::max(
workspace_size, search3::GetWorkspaceSize(args3, filter_result.algo));
#endif
}
if (dx) {
transformed_dx_ = transformed_dx_channel.data<T>();
args4.handle = handle;
args4.idesc.set(transformed_dout, iwo_group);
args4.wdesc.set(ddfilter, layout, iwo_group);
args4.odesc.set(transformed_dx_channel, iwo_group);
args4.cdesc.set(dtype,
padding_common,
strides,
dilations_,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search4 =
paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size = std::max(workspace_size, search4::GetWorkspaceSize(args4));
fwd_result.algo =
search4::Find<T>(args4, false, deterministic, workspace_size, ctx);
#else
using search4 =
paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_result = search4::Find<T>(args4, false, deterministic, ctx);
workspace_size = std::max(
workspace_size, search4::GetWorkspaceSize(args4, fwd_result.algo));
#endif
}
int i_n, i_c, i_d, i_h, i_w;
paddle::operators::GetNCDHW(transformed_x.dims(),
GPUDNNDataLayout::kNCHW,
&i_n,
&i_c,
&i_d,
&i_h,
&i_w);
int o_n, o_c, o_d, o_h, o_w;
paddle::operators::GetNCDHW(transformed_dout.dims(),
GPUDNNDataLayout::kNCHW,
&o_n,
&o_c,
&o_d,
&o_h,
&o_w);
int group_offset_in =
transformed_x.numel() / transformed_x.dims()[0] / groups;
int group_offset_out =
transformed_dout.numel() / transformed_dout.dims()[0] / groups;
int group_offset_filter = filter.numel() / groups;
paddle::operators::ScalingParamType<T> alpha = 1.0f;
paddle::operators::ScalingParamType<T> beta = 0.0f;
auto wkspace_handle = ctx.cudnn_workspace_handle();
if (ddout) {
ddx_ = transformed_ddx.data<T>();
for (int i = 0; i < groups; i++) {
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionBackwardData(
handle,
&alpha,
args1.odesc.desc(),
ddx_ + i * group_offset_in,
args1.wdesc.desc(),
filter_ + i * group_offset_filter,
args1.cdesc.desc(),
bwd_result1.algo,
&beta,
args1.idesc.desc(),
transformed_ddout_channel_ + i * group_offset_out,
workspace_ptr,
workspace_size));
},
workspace_size);
#else // PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardData(
handle,
&alpha,
args1.wdesc.desc(),
filter_ + i * group_offset_filter,
args1.odesc.desc(),
ddx_ + i * group_offset_in,
args1.cdesc.desc(),
bwd_result1.algo,
workspace_ptr,
workspace_size,
&beta,
args1.idesc.desc(),
transformed_ddout_channel_ + i * group_offset_out));
},
workspace_size);
#endif // PADDLE_WITH_HIP
}
for (int i = 0; i < groups; i++) {
#ifdef PADDLE_WITH_HIP
// MIOPEN ONLY support beta to be 0.0f
DenseTensor conv_x_ddfilter(dout.type());
conv_x_ddfilter.Resize(transformed_ddout_channel.dims());
T* conv_x_ddfilter_data = ctx.template Alloc<T>(&conv_x_ddfilter);
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionBackwardData(
handle,
&alpha,
args2.odesc.desc(),
x_ + i * group_offset_in,
args2.wdesc.desc(),
ddfilter_ + i * group_offset_filter,
args2.cdesc.desc(),
bwd_result2.algo,
&beta,
args2.idesc.desc(),
conv_x_ddfilter_data + i * group_offset_out,
workspace_ptr,
workspace_size));
},
workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenOpTensor(
handle,
miopenTensorOpAdd,
&alpha,
args2.idesc.desc(),
transformed_ddout_channel_ + i * group_offset_out,
&alpha,
args2.idesc.desc(),
conv_x_ddfilter_data + i * group_offset_out,
&beta,
args2.idesc.desc(),
transformed_ddout_channel_ + i * group_offset_out));
#else // PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardData(
handle,
&alpha,
args2.wdesc.desc(),
ddfilter_ + i * group_offset_filter,
args2.odesc.desc(),
x_ + i * group_offset_in,
args2.cdesc.desc(),
bwd_result2.algo,
workspace_ptr,
workspace_size,
&alpha,
args2.idesc.desc(),
transformed_ddout_channel_ + i * group_offset_out));
},
workspace_size);
#endif // PADDLE_WITH_HIP
}
if ((!is_sys_pad) && (!channel_last)) {
if (strides.size() == 2U) {
funcs::Slice<Context, T, 4>(
ctx, &transformed_ddout_channel, ddout, starts, ends, axes);
} else if (!is_sys_pad && strides.size() == 3U) {
funcs::Slice<Context, T, 5>(
ctx, &transformed_ddout_channel, ddout, starts, ends, axes);
}
} else if ((!is_sys_pad) && (channel_last)) {
if (strides.size() == 2U) {
funcs::Slice<Context, T, 4>(ctx,
&transformed_ddout_channel,
&transformed_ddout_channel,
starts,
ends,
axes);
} else if (!is_sys_pad && strides.size() == 3U) {
funcs::Slice<Context, T, 5>(ctx,
&transformed_ddout_channel,
&transformed_ddout_channel,
starts,
ends,
axes);
}
TransToChannelLast<Context, T>(ctx, &transformed_ddout_channel, ddout);
}
}
T* transformed_dout_channel_ = transformed_dout.data<T>();
if (dfilter) {
ddx_ = transformed_ddx_channel.data<T>();
for (int i = 0; i < groups; i++) {
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::miopenConvolutionBackwardWeights(
handle,
&alpha,
args3.odesc.desc(),
ddx_ + i * group_offset_in,
args3.idesc.desc(),
transformed_dout_channel_ + i * group_offset_out,
args3.cdesc.desc(),
filter_result.algo,
&beta,
args3.wdesc.desc(),
dfilter_ + i * group_offset_filter,
workspace_ptr,
workspace_size));
},
workspace_size);
#else // PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardFilter(
handle,
&alpha,
args3.idesc.desc(),
transformed_dout_channel_ + i * group_offset_out,
args3.odesc.desc(),
ddx_ + i * group_offset_in,
args3.cdesc.desc(),
filter_result.algo,
workspace_ptr,
workspace_size,
&beta,
args3.wdesc.desc(),
dfilter_ + i * group_offset_filter));
},
workspace_size);
#endif // PADDLE_WITH_HIP
}
}
if (dx) {
ddfilter_ = ddfilter.data<T>();
for (int i = 0; i < groups; i++) {
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionForward(
handle,
&alpha,
args4.idesc.desc(),
transformed_dout_channel_ + i * group_offset_out,
args4.wdesc.desc(),
ddfilter_ + i * group_offset_filter,
args4.cdesc.desc(),
fwd_result.algo,
&beta,
args4.odesc.desc(),
transformed_dx_ + i * group_offset_in,
workspace_ptr,
workspace_size));
},
workspace_size);
#else // PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionForward(
handle,
&alpha,
args4.idesc.desc(),
transformed_dout_channel_ + i * group_offset_out,
args4.wdesc.desc(),
ddfilter_ + i * group_offset_filter,
args4.cdesc.desc(),
fwd_result.algo,
workspace_ptr,
workspace_size,
&beta,
args4.odesc.desc(),
transformed_dx_ + i * group_offset_in));
},
workspace_size);
#endif // PADDLE_WITH_HIP
}
if (channel_last) {
TransToChannelLast<Context, T>(ctx, &transformed_dx_channel, dx);
}
}
}
template <typename T, typename Context>
void Conv3dTransposeGradGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& filter,
const DenseTensor& dout,
const std::vector<int>& strides,
const std::vector<int>& paddings_,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_,
const std::string& data_format,
DenseTensor* dx,
DenseTensor* dfilter) {
ConvTransposeGradRawGPUDNNKernel<T, Context>(ctx,
x,
filter,
dout,
strides,
paddings_,
padding_algorithm,
groups,
dilations_,
data_format,
dx,
dfilter);
}
} // namespace phi
using float16 = phi::dtype::float16;
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
PD_REGISTER_KERNEL(conv2d_transpose_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv2dTransposeGradGPUDNNKernel,
float,
float16) {}
PD_REGISTER_KERNEL(conv2d_transpose_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv2dTransposeDoubleGradGPUDNNKernel,
float,
float16) {}
PD_REGISTER_KERNEL(conv3d_transpose_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv3dTransposeGradGPUDNNKernel,
float,
float16) {}
#else
PD_REGISTER_KERNEL(conv2d_transpose_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv2dTransposeGradGPUDNNKernel,
float,
double,
float16) {}
PD_REGISTER_KERNEL(conv2d_transpose_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv2dTransposeDoubleGradGPUDNNKernel,
float,
double,
float16) {}
PD_REGISTER_KERNEL(conv3d_transpose_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv3dTransposeGradGPUDNNKernel,
float,
double,
float16) {}
#endif
|
a34dfc70cd7209bf54751389252df8ee74e5efd3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matrixTranspose(unsigned int* A_d, unsigned int *T_d, int n) {
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Populate matrixTranspose kernel function ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
int i = threadIdx.x;
int j = threadIdx.y;
if(i<n&&j<n)
T_d[i+j*n] = A_d[j+i*n];
} | a34dfc70cd7209bf54751389252df8ee74e5efd3.cu | #include "includes.h"
__global__ void matrixTranspose(unsigned int* A_d, unsigned int *T_d, int n) {
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Populate matrixTranspose kernel function ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
int i = threadIdx.x;
int j = threadIdx.y;
if(i<n&&j<n)
T_d[i+j*n] = A_d[j+i*n];
} |
sgd_op.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/operators/optimizers/sgd_op.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/amp_type_traits.h"
namespace paddle {
namespace operators {
namespace {
template <typename T, typename MT>
__global__ void SGDKernelMT(const T* param,
const T* grad,
const T* learning_rate,
const int num,
T* param_out,
const MT* master_param,
MT* master_param_out) {
MT lr = static_cast<MT>(learning_rate[0]);
CUDA_KERNEL_LOOP(i, num) {
MT p_data = master_param ? master_param[i] : static_cast<MT>(param[i]);
MT g_data = static_cast<MT>(grad[i]);
p_data = p_data - lr * g_data;
param_out[i] = static_cast<T>(p_data);
if (master_param_out) {
master_param_out[i] = p_data;
}
}
}
template <typename T>
__global__ void SparseSGDFunctorKernel(const T* selected_rows,
const int64_t* rows,
const T* learning_rate,
T* tensor_out,
int64_t row_numel,
int64_t limit) {
for (int64_t i = blockIdx.x; i < limit; i += gridDim.x) {
const T* selected_rows_ptr = selected_rows + i * row_numel;
T* tensor_out_ptr = tensor_out + rows[i] * row_numel;
for (int64_t index = threadIdx.x; index < row_numel; index += blockDim.x) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
phi::CudaAtomicAdd(
tensor_out_ptr + index,
-static_cast<T>(1.0) * learning_rate[0] * selected_rows_ptr[index]);
}
}
}
} // namespace
template <typename T>
class SGDOpKernel<phi::GPUContext, T> : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto* param_var = ctx.InputVar("Param");
PADDLE_ENFORCE_EQ(param_var->IsType<phi::DenseTensor>(),
true,
platform::errors::InvalidArgument(
"The Var(%s)'s type should be phi::DenseTensor, "
"but the received is %s",
ctx.InputNames("Param").front(),
paddle::framework::ToTypeName(param_var->Type())));
using MPDType = typename phi::dtype::MPTypeTrait<T>::Type;
auto* param = ctx.Input<phi::DenseTensor>("Param");
auto* param_out = ctx.Output<phi::DenseTensor>("ParamOut");
auto* learning_rate = ctx.Input<phi::DenseTensor>("LearningRate");
auto* grad_var = ctx.InputVar("Grad");
const bool multi_precision = ctx.Attr<bool>("multi_precision");
const phi::DenseTensor* master_param = nullptr;
phi::DenseTensor* master_param_out = nullptr;
if (multi_precision) {
bool has_master =
ctx.HasInput("MasterParam") && ctx.HasOutput("MasterParamOut");
PADDLE_ENFORCE_EQ(has_master,
true,
platform::errors::InvalidArgument(
"The Input(MasterParam) and Output(MasterParamOut) "
"should not be null when "
"the attr `multi_precision` is true"));
master_param = ctx.Input<phi::DenseTensor>("MasterParam");
master_param_out = ctx.Output<phi::DenseTensor>("MasterParamOut");
}
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision
? master_param_out->mutable_data<MPDType>(ctx.GetPlace())
: nullptr;
// Actually, all tensors are phi::DenseTensor except SelectedRows.
if (grad_var->IsType<phi::DenseTensor>()) {
auto* grad = ctx.Input<phi::DenseTensor>("Grad");
int block = 512;
int grid = (param->numel() + block - 1) / block;
hipLaunchKernelGGL(( SGDKernelMT<T, MPDType>)
, dim3(grid), dim3(block), 0, ctx.cuda_device_context().stream(),
param->data<T>(),
grad->data<T>(),
learning_rate->data<T>(),
param->numel(),
param_out->mutable_data<T>(ctx.GetPlace()),
master_in_data,
master_out_data);
} else if (grad_var->IsType<phi::SelectedRows>()) {
// TODO(qijun): In Sparse SGD operator, in-place update is enforced.
// This manual optimization brings difficulty to track data dependency.
// It's better to find a more elegant solution.
PADDLE_ENFORCE_EQ(
param,
param_out,
platform::errors::InvalidArgument(
"The input tensor Param of SgdOp should be equal with ParamOut "
"if variable's type is SelectedRows."));
auto* grad = ctx.Input<phi::SelectedRows>("Grad");
auto in_height = grad->height();
auto out_dims = param_out->dims();
PADDLE_ENFORCE_EQ(in_height,
out_dims[0],
platform::errors::InvalidArgument(
"The input tensor Grad's height of SgdOp should be "
"equal with ParamOut's dims. But received Grad's "
"height [%s] and ParamOut's dims [%s]",
in_height,
out_dims[0]));
auto& in_value = grad->value();
auto& in_rows = grad->rows();
int64_t in_row_numel = in_value.numel() / in_rows.size();
PADDLE_ENFORCE_EQ(in_row_numel,
param_out->numel() / in_height,
platform::errors::InvalidArgument(
"The in_row_numel of SgdOp should be equal with "
"param_out's numel / in_height."));
auto* in_data = in_value.data<T>();
auto* out_data = param_out->data<T>();
const int kThreadsPerBlock = 256;
int thread_x = kThreadsPerBlock;
int max_threads = ctx.cuda_device_context().GetMaxPhysicalThreadCount();
int max_blocks = ::max(max_threads / kThreadsPerBlock, 1);
phi::MixVector<int64_t> mixv_in_rows(&in_rows);
hipLaunchKernelGGL(( SparseSGDFunctorKernel), dim3(max_blocks),
dim3(thread_x),
0,
ctx.cuda_device_context().stream(),
in_data,
mixv_in_rows.CUDAData(ctx.GetPlace()),
learning_rate->data<T>(),
out_data,
in_row_numel,
in_rows.size());
} else {
PADDLE_ENFORCE_EQ(false,
true,
platform::errors::PermissionDenied(
"Unsupported Variable Type of Grad "
"in SgdOp. Excepted LodTensor or "
"SelectedRows, But received [%s]",
paddle::framework::ToTypeName(grad_var->Type())));
}
}
};
} // namespace operators
} // namespace paddle
| sgd_op.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/operators/optimizers/sgd_op.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/amp_type_traits.h"
namespace paddle {
namespace operators {
namespace {
template <typename T, typename MT>
__global__ void SGDKernelMT(const T* param,
const T* grad,
const T* learning_rate,
const int num,
T* param_out,
const MT* master_param,
MT* master_param_out) {
MT lr = static_cast<MT>(learning_rate[0]);
CUDA_KERNEL_LOOP(i, num) {
MT p_data = master_param ? master_param[i] : static_cast<MT>(param[i]);
MT g_data = static_cast<MT>(grad[i]);
p_data = p_data - lr * g_data;
param_out[i] = static_cast<T>(p_data);
if (master_param_out) {
master_param_out[i] = p_data;
}
}
}
template <typename T>
__global__ void SparseSGDFunctorKernel(const T* selected_rows,
const int64_t* rows,
const T* learning_rate,
T* tensor_out,
int64_t row_numel,
int64_t limit) {
for (int64_t i = blockIdx.x; i < limit; i += gridDim.x) {
const T* selected_rows_ptr = selected_rows + i * row_numel;
T* tensor_out_ptr = tensor_out + rows[i] * row_numel;
for (int64_t index = threadIdx.x; index < row_numel; index += blockDim.x) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
phi::CudaAtomicAdd(
tensor_out_ptr + index,
-static_cast<T>(1.0) * learning_rate[0] * selected_rows_ptr[index]);
}
}
}
} // namespace
template <typename T>
class SGDOpKernel<phi::GPUContext, T> : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto* param_var = ctx.InputVar("Param");
PADDLE_ENFORCE_EQ(param_var->IsType<phi::DenseTensor>(),
true,
platform::errors::InvalidArgument(
"The Var(%s)'s type should be phi::DenseTensor, "
"but the received is %s",
ctx.InputNames("Param").front(),
paddle::framework::ToTypeName(param_var->Type())));
using MPDType = typename phi::dtype::MPTypeTrait<T>::Type;
auto* param = ctx.Input<phi::DenseTensor>("Param");
auto* param_out = ctx.Output<phi::DenseTensor>("ParamOut");
auto* learning_rate = ctx.Input<phi::DenseTensor>("LearningRate");
auto* grad_var = ctx.InputVar("Grad");
const bool multi_precision = ctx.Attr<bool>("multi_precision");
const phi::DenseTensor* master_param = nullptr;
phi::DenseTensor* master_param_out = nullptr;
if (multi_precision) {
bool has_master =
ctx.HasInput("MasterParam") && ctx.HasOutput("MasterParamOut");
PADDLE_ENFORCE_EQ(has_master,
true,
platform::errors::InvalidArgument(
"The Input(MasterParam) and Output(MasterParamOut) "
"should not be null when "
"the attr `multi_precision` is true"));
master_param = ctx.Input<phi::DenseTensor>("MasterParam");
master_param_out = ctx.Output<phi::DenseTensor>("MasterParamOut");
}
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision
? master_param_out->mutable_data<MPDType>(ctx.GetPlace())
: nullptr;
// Actually, all tensors are phi::DenseTensor except SelectedRows.
if (grad_var->IsType<phi::DenseTensor>()) {
auto* grad = ctx.Input<phi::DenseTensor>("Grad");
int block = 512;
int grid = (param->numel() + block - 1) / block;
SGDKernelMT<T, MPDType>
<<<grid, block, 0, ctx.cuda_device_context().stream()>>>(
param->data<T>(),
grad->data<T>(),
learning_rate->data<T>(),
param->numel(),
param_out->mutable_data<T>(ctx.GetPlace()),
master_in_data,
master_out_data);
} else if (grad_var->IsType<phi::SelectedRows>()) {
// TODO(qijun): In Sparse SGD operator, in-place update is enforced.
// This manual optimization brings difficulty to track data dependency.
// It's better to find a more elegant solution.
PADDLE_ENFORCE_EQ(
param,
param_out,
platform::errors::InvalidArgument(
"The input tensor Param of SgdOp should be equal with ParamOut "
"if variable's type is SelectedRows."));
auto* grad = ctx.Input<phi::SelectedRows>("Grad");
auto in_height = grad->height();
auto out_dims = param_out->dims();
PADDLE_ENFORCE_EQ(in_height,
out_dims[0],
platform::errors::InvalidArgument(
"The input tensor Grad's height of SgdOp should be "
"equal with ParamOut's dims. But received Grad's "
"height [%s] and ParamOut's dims [%s]",
in_height,
out_dims[0]));
auto& in_value = grad->value();
auto& in_rows = grad->rows();
int64_t in_row_numel = in_value.numel() / in_rows.size();
PADDLE_ENFORCE_EQ(in_row_numel,
param_out->numel() / in_height,
platform::errors::InvalidArgument(
"The in_row_numel of SgdOp should be equal with "
"param_out's numel / in_height."));
auto* in_data = in_value.data<T>();
auto* out_data = param_out->data<T>();
const int kThreadsPerBlock = 256;
int thread_x = kThreadsPerBlock;
int max_threads = ctx.cuda_device_context().GetMaxPhysicalThreadCount();
int max_blocks = std::max(max_threads / kThreadsPerBlock, 1);
phi::MixVector<int64_t> mixv_in_rows(&in_rows);
SparseSGDFunctorKernel<<<max_blocks,
thread_x,
0,
ctx.cuda_device_context().stream()>>>(
in_data,
mixv_in_rows.CUDAData(ctx.GetPlace()),
learning_rate->data<T>(),
out_data,
in_row_numel,
in_rows.size());
} else {
PADDLE_ENFORCE_EQ(false,
true,
platform::errors::PermissionDenied(
"Unsupported Variable Type of Grad "
"in SgdOp. Excepted LodTensor or "
"SelectedRows, But received [%s]",
paddle::framework::ToTypeName(grad_var->Type())));
}
}
};
} // namespace operators
} // namespace paddle
|
620e1c59889a4aa1c9cdc98107440984737d07b3.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include <algorithm>
#include <utility>
#include <hip/hip_runtime.h>
#include <cstdio>
#include <ctime>
#define HANDLE_ERROR(x) if((x)!=hipSuccess){std::cout<<hipGetErrorString((x))<<std::endl;exit(-1);}
/****************CODE STARTS HERE*******************/
__device__ int satisfies(int i, int j, int *A, int *B)
{
return (A[i] <= B[j]);
}
__global__ void MergePath(int *A, int *B, int* C, int *x, int *y, int n)
{
int num_of_threads = blockDim.x;
int idx = threadIdx.x;
bool flag = false;
if (idx == 0)
{
x[idx] = 0;
y[idx] = 0;
flag = true;
}
int A_start = idx*(2 * n) / num_of_threads; //only when len(A)==len(B)
int B_start = max(0, A_start - (n - 1));
A_start = min(n - 1, A_start);
int length_of_array;
if (B_start == 0)
{
length_of_array = A_start + 1;
}
else
length_of_array = n - B_start;
int left = 0, right = length_of_array - 1;
// cout<<A_start<<" "<<B_start<<" "<<length_of_array<<endl<<"-------------------------------------------\n";
while (left <= right && !flag)
{
// cout<<left<<" "<<right<<endl;
int mid = left + (right - left) / 2;
int I = A_start - mid;
int J = B_start + mid;
if (!satisfies(I, J, A, B))
{
left = mid + 1;
}
else
{
if (J == 0)
{
x[idx] = (I + 1);
y[idx] = (J);
flag = true;
}
else if (I == n - 1)
{
x[idx] = (I + 1);
y[idx] = (J);
flag = true;
}
else
{
if (!satisfies(I + 1, J - 1, A, B))
{
x[idx] = (I + 1);
y[idx] = (J);
flag = true;
}
else
{
right = mid;
}
}
}
}
left--;
if (!flag)
{
x[idx] = (A_start - left);
y[idx] = (n);
}
__syncthreads();
int end_x, end_y;
if (idx == num_of_threads - 1)
{
end_x = n;
end_y = n;
}
else
{
end_x = x[idx + 1];
end_y = y[idx + 1];
}
int cur_x = x[idx];
int cur_y = y[idx];
int put_at = cur_x + cur_y;
while (cur_x<end_x && cur_y<end_y)
{
if (A[cur_x] <= B[cur_y])
{
C[put_at++] = A[cur_x++];
}
else
{
C[put_at++] = B[cur_y++];
}
}
while (cur_x<end_x)
C[put_at++] = A[cur_x++];
while (cur_y<end_y)
C[put_at++] = B[cur_y++];
}
void printArr(int *C,int N)
{
FILE *g = fopen("g.txt", "w+");
for (int i = 0; i < N; i++)
{
//std::cout << C[i] << " ";
fprintf(g, "%d ", C[i]);
}
//std::cout << std::endl;
}
int main()
{
clock_t st, en;
st = clock();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int n;
FILE *f = fopen("f.txt", "r");
if (!f)
{
std::cout << "Error\n";
exit(-1);
}
fscanf(f,"%d", &n);
printf("%d\n",n);
//std::cin >> n;
int *C = (int*)malloc(sizeof(int)* 2 * n);
int *A = (int*)malloc(sizeof(int)*n);
int *B = (int*)malloc(sizeof(int)*n);
for (int i = 0; i<n; i++)
{
//std::cin >> A[i];
fscanf(f,"%d", A + i);
}
for (int i = 0; i<n; i++)
{
//std::cin >> B[i];
fscanf(f,"%d", B + i);
}
std::sort(A, A + n);
std::sort(B, B + n);
/*
for (int i = 0; i < n; i++)
std::cout << A[i] << " ";
std::cout << std::endl;
for (int i = 0; i < n; i++)
std::cout << B[i] << " ";
*/
int num_of_threads;
//std::cin >> num_of_threads;
fscanf(f, "%d", &num_of_threads);
printf("%d\n", num_of_threads);
int *d_x,*d_y,*d_A,*d_B,*d_C;
HANDLE_ERROR(hipMalloc((void**)&d_x,sizeof(int)*num_of_threads));
HANDLE_ERROR(hipMalloc((void**)&d_y, sizeof(int)*num_of_threads));
HANDLE_ERROR(hipMalloc((void**)&d_A, n*sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&d_B, n*sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&d_C, 2 * n*sizeof(int)));
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipMemcpy(d_A, A, n*sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_B, B, n*sizeof(int), hipMemcpyHostToDevice));
hipEventRecord(start, 0);
MergePath << <1, num_of_threads >> >(d_A, d_B, d_C, d_x, d_y, n);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop); // that's our time!
// Clean up:
hipEventDestroy(start);
hipEventDestroy(stop);
printf("%f\n", elapsedTime/1000);
HANDLE_ERROR(hipMemcpy(C, d_C, 2 * n*sizeof(int), hipMemcpyDeviceToHost));
printArr(C,2*n);
HANDLE_ERROR(hipFree(d_x));
HANDLE_ERROR(hipFree(d_A));
HANDLE_ERROR(hipFree(d_B));
HANDLE_ERROR(hipFree(d_C));
HANDLE_ERROR(hipFree(d_y));
float elp = (float)(en - st) / CLOCKS_PER_SEC;
printf("%.10f\n", elp);
return 0;
} | 620e1c59889a4aa1c9cdc98107440984737d07b3.cu | #include <iostream>
#include <vector>
#include <algorithm>
#include <utility>
#include <cuda.h>
#include <cstdio>
#include <ctime>
#define HANDLE_ERROR(x) if((x)!=cudaSuccess){std::cout<<cudaGetErrorString((x))<<std::endl;exit(-1);}
/****************CODE STARTS HERE*******************/
__device__ int satisfies(int i, int j, int *A, int *B)
{
return (A[i] <= B[j]);
}
__global__ void MergePath(int *A, int *B, int* C, int *x, int *y, int n)
{
int num_of_threads = blockDim.x;
int idx = threadIdx.x;
bool flag = false;
if (idx == 0)
{
x[idx] = 0;
y[idx] = 0;
flag = true;
}
int A_start = idx*(2 * n) / num_of_threads; //only when len(A)==len(B)
int B_start = max(0, A_start - (n - 1));
A_start = min(n - 1, A_start);
int length_of_array;
if (B_start == 0)
{
length_of_array = A_start + 1;
}
else
length_of_array = n - B_start;
int left = 0, right = length_of_array - 1;
// cout<<A_start<<" "<<B_start<<" "<<length_of_array<<endl<<"-------------------------------------------\n";
while (left <= right && !flag)
{
// cout<<left<<" "<<right<<endl;
int mid = left + (right - left) / 2;
int I = A_start - mid;
int J = B_start + mid;
if (!satisfies(I, J, A, B))
{
left = mid + 1;
}
else
{
if (J == 0)
{
x[idx] = (I + 1);
y[idx] = (J);
flag = true;
}
else if (I == n - 1)
{
x[idx] = (I + 1);
y[idx] = (J);
flag = true;
}
else
{
if (!satisfies(I + 1, J - 1, A, B))
{
x[idx] = (I + 1);
y[idx] = (J);
flag = true;
}
else
{
right = mid;
}
}
}
}
left--;
if (!flag)
{
x[idx] = (A_start - left);
y[idx] = (n);
}
__syncthreads();
int end_x, end_y;
if (idx == num_of_threads - 1)
{
end_x = n;
end_y = n;
}
else
{
end_x = x[idx + 1];
end_y = y[idx + 1];
}
int cur_x = x[idx];
int cur_y = y[idx];
int put_at = cur_x + cur_y;
while (cur_x<end_x && cur_y<end_y)
{
if (A[cur_x] <= B[cur_y])
{
C[put_at++] = A[cur_x++];
}
else
{
C[put_at++] = B[cur_y++];
}
}
while (cur_x<end_x)
C[put_at++] = A[cur_x++];
while (cur_y<end_y)
C[put_at++] = B[cur_y++];
}
void printArr(int *C,int N)
{
FILE *g = fopen("g.txt", "w+");
for (int i = 0; i < N; i++)
{
//std::cout << C[i] << " ";
fprintf(g, "%d ", C[i]);
}
//std::cout << std::endl;
}
int main()
{
clock_t st, en;
st = clock();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int n;
FILE *f = fopen("f.txt", "r");
if (!f)
{
std::cout << "Error\n";
exit(-1);
}
fscanf(f,"%d", &n);
printf("%d\n",n);
//std::cin >> n;
int *C = (int*)malloc(sizeof(int)* 2 * n);
int *A = (int*)malloc(sizeof(int)*n);
int *B = (int*)malloc(sizeof(int)*n);
for (int i = 0; i<n; i++)
{
//std::cin >> A[i];
fscanf(f,"%d", A + i);
}
for (int i = 0; i<n; i++)
{
//std::cin >> B[i];
fscanf(f,"%d", B + i);
}
std::sort(A, A + n);
std::sort(B, B + n);
/*
for (int i = 0; i < n; i++)
std::cout << A[i] << " ";
std::cout << std::endl;
for (int i = 0; i < n; i++)
std::cout << B[i] << " ";
*/
int num_of_threads;
//std::cin >> num_of_threads;
fscanf(f, "%d", &num_of_threads);
printf("%d\n", num_of_threads);
int *d_x,*d_y,*d_A,*d_B,*d_C;
HANDLE_ERROR(cudaMalloc((void**)&d_x,sizeof(int)*num_of_threads));
HANDLE_ERROR(cudaMalloc((void**)&d_y, sizeof(int)*num_of_threads));
HANDLE_ERROR(cudaMalloc((void**)&d_A, n*sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_B, n*sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_C, 2 * n*sizeof(int)));
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(d_A, A, n*sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_B, B, n*sizeof(int), cudaMemcpyHostToDevice));
cudaEventRecord(start, 0);
MergePath << <1, num_of_threads >> >(d_A, d_B, d_C, d_x, d_y, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop); // that's our time!
// Clean up:
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("%f\n", elapsedTime/1000);
HANDLE_ERROR(cudaMemcpy(C, d_C, 2 * n*sizeof(int), cudaMemcpyDeviceToHost));
printArr(C,2*n);
HANDLE_ERROR(cudaFree(d_x));
HANDLE_ERROR(cudaFree(d_A));
HANDLE_ERROR(cudaFree(d_B));
HANDLE_ERROR(cudaFree(d_C));
HANDLE_ERROR(cudaFree(d_y));
float elp = (float)(en - st) / CLOCKS_PER_SEC;
printf("%.10f\n", elp);
return 0;
} |
ceee72feff44a29a5b18dfaaad4886a17c82b272.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#define CHECK(call) \
{ \
const hipError_t error = call; \
if(error != hipSuccess) \
{ \
printf("Error: %s: %d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
}
extern "C" __global__ void dotp(double* A, double* B, double* out){
int bx = blockIdx.x;
int bdx = blockDim.x;
int gdx = gridDim.x;
int tx = threadIdx.x;
int BLOCK_SIZE = 32;
double a[20], b[20];
/*** Global A,B -> Register a,b ***/
#pragma unroll
for(int i = 0; i < 20; i++){
a[i] = A[20*BLOCK_SIZE*bx + tx + BLOCK_SIZE*i];
}
#pragma unroll
for(int i = 0; i < 20; i++){
b[i] = B[20*BLOCK_SIZE*bx + tx + BLOCK_SIZE*i];
}
/**** dot production ***/
double o = 0;
for(int i = 0; i < 20; ++i){
o += a[i]*b[i];
}
/**** Register o -> Global out ***/
out[bx*BLOCK_SIZE + tx] = o;
}
extern "C" void dot(int size){
int N = size;
int T = 32;
// int T = atoi(argv[2]);
double * A, *B, *out;
A = (double*)malloc( N*N*N*20*sizeof(double));
B = (double*)malloc( N*N*N*20*sizeof(double) );
out = (double*)malloc( N*N*N*sizeof(double));
// initialize
for(int i = 0; i < N*N*N; ++i){
for(int j = 0; j < 20; ++j){
A[i*20 + j] = 100*i+j;
B[i*20 + j] = 1000*i+j;
}
out[i] = 0.0;
}
double *dA, *dB, *dout;
hipMalloc( (void**)&dA, N*N*N*20*sizeof(double));
hipMalloc( (void**)&dB, N*N*N*20*sizeof(double));
hipMalloc( (void**)&dout, N*N*N*sizeof(double));
hipMemcpy(A, dA, N*N*N*20*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(B, dB, N*N*N*20*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(out, dout, N*N*N*sizeof(double), hipMemcpyHostToDevice);
dim3 grid(N*N*N/T);
dim3 block(T);
// StartTimer();
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
hipLaunchKernelGGL(( dotp), dim3(grid),dim3(block), 0, 0, dA,dB,dout);
CHECK(hipDeviceSynchronize());
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float time;
hipEventElapsedTime(&time, start, stop);
// double time = GetTimer(); // [ms]
double flops = 39*N*N*N / (time * 1e-3); // Flop/sec
printf("%d^3: time %f[ms], flops %f [GFlops]\n", N, time, flops * 1e-9);
hipMemcpy(A, dA, N*N*N*20*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(B, dB, N*N*N*20*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(out, dout, N*N*N*sizeof(double), hipMemcpyDeviceToHost);
free(out);
free(A);
free(B);
hipFree(dout);
hipFree(dA);
hipFree(dB);
}
| ceee72feff44a29a5b18dfaaad4886a17c82b272.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error != cudaSuccess) \
{ \
printf("Error: %s: %d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
extern "C" __global__ void dotp(double* A, double* B, double* out){
int bx = blockIdx.x;
int bdx = blockDim.x;
int gdx = gridDim.x;
int tx = threadIdx.x;
int BLOCK_SIZE = 32;
double a[20], b[20];
/*** Global A,B -> Register a,b ***/
#pragma unroll
for(int i = 0; i < 20; i++){
a[i] = A[20*BLOCK_SIZE*bx + tx + BLOCK_SIZE*i];
}
#pragma unroll
for(int i = 0; i < 20; i++){
b[i] = B[20*BLOCK_SIZE*bx + tx + BLOCK_SIZE*i];
}
/**** dot production ***/
double o = 0;
for(int i = 0; i < 20; ++i){
o += a[i]*b[i];
}
/**** Register o -> Global out ***/
out[bx*BLOCK_SIZE + tx] = o;
}
extern "C" void dot(int size){
int N = size;
int T = 32;
// int T = atoi(argv[2]);
double * A, *B, *out;
A = (double*)malloc( N*N*N*20*sizeof(double));
B = (double*)malloc( N*N*N*20*sizeof(double) );
out = (double*)malloc( N*N*N*sizeof(double));
// initialize
for(int i = 0; i < N*N*N; ++i){
for(int j = 0; j < 20; ++j){
A[i*20 + j] = 100*i+j;
B[i*20 + j] = 1000*i+j;
}
out[i] = 0.0;
}
double *dA, *dB, *dout;
cudaMalloc( (void**)&dA, N*N*N*20*sizeof(double));
cudaMalloc( (void**)&dB, N*N*N*20*sizeof(double));
cudaMalloc( (void**)&dout, N*N*N*sizeof(double));
cudaMemcpy(A, dA, N*N*N*20*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(B, dB, N*N*N*20*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(out, dout, N*N*N*sizeof(double), cudaMemcpyHostToDevice);
dim3 grid(N*N*N/T);
dim3 block(T);
// StartTimer();
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
dotp<<<grid,block>>>(dA,dB,dout);
CHECK(cudaDeviceSynchronize());
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time, start, stop);
// double time = GetTimer(); // [ms]
double flops = 39*N*N*N / (time * 1e-3); // Flop/sec
printf("%d^3: time %f[ms], flops %f [GFlops]\n", N, time, flops * 1e-9);
cudaMemcpy(A, dA, N*N*N*20*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(B, dB, N*N*N*20*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(out, dout, N*N*N*sizeof(double), cudaMemcpyDeviceToHost);
free(out);
free(A);
free(B);
cudaFree(dout);
cudaFree(dA);
cudaFree(dB);
}
|
328c46bc4b4a7fb5e2634b7c065130a80673d2a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "deltaCV/gpu/cudaImg.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <opencv2/opencv.hpp>
__global__ void guassianBlur3(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols)
{
int xdx = threadIdx.x + __umul24(blockIdx.x, blockDim.x);
int ydx = threadIdx.y + __umul24(blockIdx.y, blockDim.y);
if(xdx>0 && xdx<imgCols-1 && ydx>0 && ydx<imgRows-1)
{
dataOut[xdx + ydx * imgCols] =
(dataIn[(xdx-1)+(ydx-1)*imgCols] + dataIn[(xdx)+(ydx-1)*imgCols]*2 + dataIn[(xdx+1)+(ydx-1)*imgCols]+
dataIn[(xdx-1)+(ydx)*imgCols]*2 + dataIn[(xdx)+(ydx)*imgCols]*4 + dataIn[(xdx+1)+(ydx)*imgCols]*2 +
dataIn[(xdx-1)+(ydx+1)*imgCols] + dataIn[(xdx)+(ydx+1)*imgCols]*2 + dataIn[(xdx+1)+(ydx+1)*imgCols])/16;
}
}
//__global__ void guassianBlur5(hipTextureObject_t dataIn,
// unsigned char* dataOut,
// short int imgRows,
// short int imgCols)
//{
// int xdx = threadIdx.x + __umul24(blockIdx.x, blockDim.x);
// int ydx = threadIdx.y + __umul24(blockIdx.y, blockDim.y);
//
// dataOut[xdx+imgCols*ydx] =
// (tex2D(dataIn,xdx-2,ydx-2) + tex2D(dataIn,xdx-1,ydx-2)*4 + tex2D(dataIn,xdx,ydx-2)*7 + tex2D(dataIn,xdx+1,ydx-2)*4 + tex2D(dataIn,xdx+2,ydx-2) +
// tex2D(dataIn,xdx-2,ydx-1)*4 + tex2D(dataIn,xdx-1,ydx-1)*16 + tex2D(dataIn,xdx,ydx-1)*26 + tex2D(dataIn,xdx+1,ydx-1)*16 + tex2D(dataIn,xdx+2,ydx-1)*4 +
// tex2D(dataIn,xdx-2,ydx)*7 + tex2D(dataIn,xdx-1,ydx)*26 + tex2D(dataIn,xdx,ydx)*41 + tex2D(dataIn,xdx+1,ydx)*26 + tex2D(dataIn,xdx+2,ydx)*7 +
// tex2D(dataIn,xdx-2,ydx+1)*4 + tex2D(dataIn,xdx-1,ydx+1)*16 + tex2D(dataIn,xdx,ydx+1)*26 + tex2D(dataIn,xdx+1,ydx+1)*16 + tex2D(dataIn,xdx+2,ydx+1)*4 +
// tex2D(dataIn,xdx-2,ydx+2) + tex2D(dataIn,xdx-1,ydx+2)*4 + tex2D(dataIn,xdx,ydx+2)*7 + tex2D(dataIn,xdx+1,ydx+2)*4 + tex2D(dataIn,xdx+2,ydx+2))/273;
//
//}
void guassianBlur3_gpu(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols,
dim3 tPerBlock,
dim3 bPerGrid)
{
hipLaunchKernelGGL(( guassianBlur3), dim3(bPerGrid),dim3(tPerBlock), 0, 0, dataIn,dataOut,imgRows,imgCols);
}
| 328c46bc4b4a7fb5e2634b7c065130a80673d2a0.cu | #include "deltaCV/gpu/cudaImg.cuh"
#include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <opencv2/opencv.hpp>
__global__ void guassianBlur3(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols)
{
int xdx = threadIdx.x + __umul24(blockIdx.x, blockDim.x);
int ydx = threadIdx.y + __umul24(blockIdx.y, blockDim.y);
if(xdx>0 && xdx<imgCols-1 && ydx>0 && ydx<imgRows-1)
{
dataOut[xdx + ydx * imgCols] =
(dataIn[(xdx-1)+(ydx-1)*imgCols] + dataIn[(xdx)+(ydx-1)*imgCols]*2 + dataIn[(xdx+1)+(ydx-1)*imgCols]+
dataIn[(xdx-1)+(ydx)*imgCols]*2 + dataIn[(xdx)+(ydx)*imgCols]*4 + dataIn[(xdx+1)+(ydx)*imgCols]*2 +
dataIn[(xdx-1)+(ydx+1)*imgCols] + dataIn[(xdx)+(ydx+1)*imgCols]*2 + dataIn[(xdx+1)+(ydx+1)*imgCols])/16;
}
}
//__global__ void guassianBlur5(cudaTextureObject_t dataIn,
// unsigned char* dataOut,
// short int imgRows,
// short int imgCols)
//{
// int xdx = threadIdx.x + __umul24(blockIdx.x, blockDim.x);
// int ydx = threadIdx.y + __umul24(blockIdx.y, blockDim.y);
//
// dataOut[xdx+imgCols*ydx] =
// (tex2D(dataIn,xdx-2,ydx-2) + tex2D(dataIn,xdx-1,ydx-2)*4 + tex2D(dataIn,xdx,ydx-2)*7 + tex2D(dataIn,xdx+1,ydx-2)*4 + tex2D(dataIn,xdx+2,ydx-2) +
// tex2D(dataIn,xdx-2,ydx-1)*4 + tex2D(dataIn,xdx-1,ydx-1)*16 + tex2D(dataIn,xdx,ydx-1)*26 + tex2D(dataIn,xdx+1,ydx-1)*16 + tex2D(dataIn,xdx+2,ydx-1)*4 +
// tex2D(dataIn,xdx-2,ydx)*7 + tex2D(dataIn,xdx-1,ydx)*26 + tex2D(dataIn,xdx,ydx)*41 + tex2D(dataIn,xdx+1,ydx)*26 + tex2D(dataIn,xdx+2,ydx)*7 +
// tex2D(dataIn,xdx-2,ydx+1)*4 + tex2D(dataIn,xdx-1,ydx+1)*16 + tex2D(dataIn,xdx,ydx+1)*26 + tex2D(dataIn,xdx+1,ydx+1)*16 + tex2D(dataIn,xdx+2,ydx+1)*4 +
// tex2D(dataIn,xdx-2,ydx+2) + tex2D(dataIn,xdx-1,ydx+2)*4 + tex2D(dataIn,xdx,ydx+2)*7 + tex2D(dataIn,xdx+1,ydx+2)*4 + tex2D(dataIn,xdx+2,ydx+2))/273;
//
//}
void guassianBlur3_gpu(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols,
dim3 tPerBlock,
dim3 bPerGrid)
{
guassianBlur3<<<bPerGrid,tPerBlock>>>(dataIn,dataOut,imgRows,imgCols);
}
|
da08e6b178cbe97a2991c2f7088708729f39eda3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_hip.cuh"
/*__global__ void PointsMean(double *points, double *means, int rows)
{
int tid = threadIdx.x;
if (tid < 3){
for (int i = 0; i < rows; i++)
{
means[tid] += points[i * 3 + tid];
}
means[tid] /= rows;
}
}
__global__ void PointsDis(double* points,double *means, int rows)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < 3)
{
points[i * 3 + j] -= means[j];
}
}
__global__ void PointsScale(double *points, double *cache, int rows)
{
__shared__ double temp[1024];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int tempIndex = threadIdx.x;
temp[tempIndex] = 0;
if (tid < rows)
{
temp[tempIndex] = pow(points[tid],2)/rows;
}
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (tempIndex < i)
{
temp[tempIndex] += temp[tempIndex + i];
}
__syncthreads();
i /= 2;
}
if (tempIndex == 0)
{
cache[blockIdx.x] = temp[0];
}
}
__global__ void PointsNorm(double *points,double scale, int rows)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < 3)
{
points[i * 3 + j] /= scale;
}
}*/
__global__ void Probability1(double* target, double* source, double* p, double ksig, int tarrows, int sourows)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < tarrows && j < sourows)
{
double razn = pow(target[3 * i] - source[3 * j], 2) + pow(target[3 * i + 1] - source[3 * j + 1], 2) +
pow(target[3 * i + 2] - source[3 * j + 2], 2);
p[i*sourows + j] = exp(razn / ksig);
}
}
__global__ void Probability2(double* p, double* sp, double* pt1, double*l, double outlier_tmp, int tarrows, int sourows)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < tarrows)
{
sp[i] = 0;
for (int j = 0; j < sourows; j++)
{
sp[i] += p[i*sourows + j];
}
sp[i] += outlier_tmp;
pt1[i] = 1 - outlier_tmp / sp[i];
l[i] = -log(sp[i]);
}
}
__global__ void Probability3(double* target, double* p, double* sp, double* p1, double* px, int tarrows, int sourows)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (j < sourows)
{
double temp;
px[j * 3] = 0;
px[j * 3 + 1] = 0;
px[j * 3 + 2] = 0;
p1[j] = 0;
for (int i = 0; i < tarrows; i++)
{
temp = p[i*sourows + j] / sp[i];
p1[j] += temp;
px[j * 3] += target[i * 3] * temp;
px[j * 3 + 1] += target[i * 3 + 1] * temp;
px[j * 3 + 2] += target[i * 3 + 2] * temp;
}
}
}
__global__ void Computel(double* temp_l, double* cache, int tarrows)
{
__shared__ double temp[1024];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int tempIndex = threadIdx.x;
temp[tempIndex] = 0;
if (tid < tarrows)
{
temp[tempIndex] = temp_l[tid];
}
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (tempIndex < i)
{
temp[tempIndex] += temp[tempIndex + i];
}
__syncthreads();
i /= 2;
}
if (tempIndex == 0)
{
cache[blockIdx.x] = temp[0];
}
}
__global__ void ComputeNp(double* p1, double* cache, int sourows)
{
__shared__ double temp[1024];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int tempIndex = threadIdx.x;
temp[tempIndex] = 0;
if (tid < sourows)
{
temp[tempIndex] = p1[tid];
}
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (tempIndex < i)
{
temp[tempIndex] += temp[tempIndex + i];
}
__syncthreads();
i /= 2;
}
if (tempIndex == 0)
{
cache[blockIdx.x] = temp[0];
}
}
__global__ void ComputeMu(double* target, double* source, double* p1, double* pt1, double *mu_x, double *mu_y, int tarrows, int sourows, double np)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < 6)
{
if (tid < 3)
{
int i = tid;
mu_x[i] = 0;
for (int dy = 0; dy < tarrows; dy++)
{
mu_x[i] += target[dy * 3 + i] * pt1[dy] / np;
}
}
else
{
int i = tid - 3;
mu_y[i] = 0;
for (int dy = 0; dy < sourows; dy++)
{
mu_y[i] += source[dy * 3 + i] * p1[dy] / np;
}
}
}
}
__global__ void ComputeB(double *b1, double *b2, double *px, double *p1,double *mu_x, double *mu_y, double *source, double np, int sourows)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < 3 && j < 6)
{
if (j < 3)
{
b1[i * 3 + j] = 0;
for (int dy = 0; dy < sourows; dy++)
{
b1[i * 3 + j] += px[dy * 3 + i] * source[dy * 3 + j];
}
b1[i * 3 + j] -= np * mu_x[i] * mu_y[j];
}
else
{
int x = i;
int y = j - 3;
b2[x * 3 + y] = 0;
for (int dy = 0; dy < sourows; dy++)
{
b2[x * 3 + y] += source[dy * 3 + x] * p1[dy] * source[dy * 3 + y];
}
b2[x * 3 + y] -= np * mu_y[x] * mu_y[y];
}
}
}
__global__ void ComputeTrans(double *b1,double *b2, double *inv_b2, double *mu_x, double *mu_y, double *transform, double *translation)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < 3 && j < 3)
{
double temp[4];
double det = b2[0] * b2[4] * b2[8] + b2[3] * b2[7] * b2[2] + b2[6] * b2[1] * b2[5]
- b2[0] * b2[7] * b2[5] - b2[3] * b2[1] * b2[8] - b2[6] * b2[4] * b2[2];
int tid = 0;
for (int x = 0; x < 3; x++){
if (x != j){
for (int y = 0; y < 3; y++){
if (y != i){
temp[tid] = b2[x * 3 + y];
tid++;
}
}
}
}
inv_b2[i * 3 + j] = pow(-1.0, i + j) * (temp[0] * temp[3] - temp[1] * temp[2]) / det;
__syncthreads();
transform[i * 3 + j] = b1[i * 3] * inv_b2[j] + b1[i * 3 + 1] * inv_b2[3 + j] + b1[i * 3 + 2] * inv_b2[6 + j];
__syncthreads();
if (j == 0)
{
translation[i] = mu_x[i] -
(transform[i * 3] * mu_y[0] + transform[i * 3 + 1] * mu_y[1] + transform[i * 3 + 2] * mu_y[2]);
}
}
}
__global__ void ComputeSigma1(double* target, double* pt1, double* cache, int tarrows)
{
__shared__ double temp[1024];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int tempIndex = threadIdx.x;
temp[tempIndex] = 0;
if (tid < tarrows)
{
temp[tempIndex] = (pow(target[tid * 3], 2) + pow(target[tid * 3 + 1], 2) + pow(target[tid * 3 + 2], 2))* pt1[tid];
}
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (tempIndex < i)
{
temp[tempIndex] += temp[tempIndex + i];
}
__syncthreads();
i /= 2;
}
if (tempIndex == 0)
{
cache[blockIdx.x] = temp[0];
}
}
__global__ void ComputeSigma2(double *cache, double *mu_x, double *mu_y, double *b1, double *transform, double *sigma2, double np, int num)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid == 0)
{
*sigma2 = 0;
for (int i = 0; i < num; i++)
{
*sigma2 += cache[i];
}
for (int i = 0; i < 3; i++)
{
*sigma2 += -np * mu_x[i] * mu_x[i] -
(b1[i * 3] * transform[i] + b1[i * 3 + 1] * transform[3 + i] + b1[i * 3 + 2] * transform[6 + i]);
}
*sigma2 /= np * 3;
}
}
__global__ void ComputeRes(double* source, double* transform, double* translation, double* result, int sourows)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < sourows && j < 3)
{
result[i * 3 + j] = source[i * 3] * transform[j*3] + source[i * 3 + 1] * transform[j*3 + 1] + source[i * 3 + 2] * transform[j*3 + 2] + translation[j];
}
}
/*__global__ void Denormalize(double *result, double *target, double *transform, double *translation, double *tar_means, double *sou_means, double scale, int sourows)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < 3 && j == 0)
{
translation[i] = scale*translation[i] + tar_means[i];
for (int tid = 0; tid < 3; tid++)
{
translation[i] -= transform[i * 3 + tid] * sou_means[tid];
}
}
if (i < sourows && j < 3)
{
result[i * 3 + j] = result[i * 3 + j] * scale + tar_means[j];
}
}*/
hipError_t Compute(myMatrix* target, myMatrix* source, double* result, double* transform, double* translation, double sigma2, double outliers, double tolerance, int max_iter)
{
//define the initial value
double ntol = tolerance + 10.0;
double l = 0.0;
int iter = 0;
double res_sigma2 = sigma2;
int tarrows = target->rows;
int sourows = source->rows;
dim3 tarnormblocks((tarrows + 31) / 32, 1);
dim3 tarnormthreads(32, 3);
dim3 sounormblocks((sourows + 31) / 32, 1);
dim3 sounormthreads(32, 3);
dim3 problocks((tarrows + 31) / 32, (sourows + 31) / 32);
dim3 prothreads(32, 32);
dim3 comblocks(1,1);
dim3 comthreads(3,6);
dim3 transblocks(1, 1);
dim3 transthreads(3, 3);
dim3 resblocks((sourows + 31) / 32,1);
dim3 resthreads(32, 3);
int num1 = (tarrows + 1023) / 1024;
int num2 = (sourows + 1023) / 1024;
//CPU memory allocation
double *cache_tar = (double*)malloc(num1* sizeof(double));
double *cache_sou = (double*)malloc(num2* sizeof(double));
//GPU memory allocation
hipError_t cudaStatus;
double *dev_target, *dev_source, *dev_result;
cudaStatus = hipMalloc((void**)&dev_target, tarrows*3*sizeof(double));
cudaStatus = hipMalloc((void**)&dev_source, sourows*3*sizeof(double));
cudaStatus = hipMalloc((void**)&dev_result, sourows*3*sizeof(double));
cudaStatus = hipMemcpy(dev_target, target->Mat, tarrows * 3 * sizeof(double), hipMemcpyHostToDevice);
cudaStatus = hipMemcpy(dev_source, source->Mat, sourows * 3 * sizeof(double), hipMemcpyHostToDevice);
double *dev_tar_means, *dev_sou_means;
cudaStatus = hipMalloc((void**)&dev_tar_means, 3 * sizeof(double));
cudaStatus = hipMalloc((void**)&dev_sou_means, 3 * sizeof(double));
double *dev_cache_tar, *dev_cache_sou, *dev_sigma2;
cudaStatus = hipMalloc((void**)&dev_cache_tar, num1* sizeof(double));
cudaStatus = hipMalloc((void**)&dev_cache_sou, num2* sizeof(double));
cudaStatus = hipMalloc((void**)&dev_sigma2, sizeof(double));
/*PointsMean << <1, 3 >> >(dev_target, dev_tar_means, tarrows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
PointsDis << <tarnormblocks, tarnormthreads >> >(dev_target, dev_tar_means, tarrows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
PointsScale << <num1, 1024 >> >(dev_target, dev_cache_tar, tarrows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(cache_tar, dev_cache_tar, num1 * sizeof(double), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
printf("%s\n", hipGetErrorString(cudaStatus));
goto Error;
}
double scale_tar = 0;
for (int i = 0; i < num1; i++)
{
scale_tar += cache_tar[i];
}
PointsMean << <1, 3 >> >(dev_source, dev_sou_means, sourows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
PointsDis << <sounormblocks, sounormthreads >> >(dev_source, dev_sou_means, sourows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
PointsScale << <num2, 1024 >> >(dev_source, dev_cache_sou, sourows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(cache_sou, dev_cache_sou, num2 * sizeof(double), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
printf("%s\n", hipGetErrorString(cudaStatus));
goto Error;
}
double scale_sou = 0;
for (int i = 0; i < num1; i++)
{
scale_sou += cache_tar[i];
}
double scale = ::max(scale_tar, scale_sou);
PointsNorm << <tarnormblocks, tarnormthreads >> >(dev_target, scale, tarrows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
PointsNorm << <sounormblocks, sounormthreads >> >(dev_source, scale, sourows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}*/
cudaStatus = hipMemcpy(dev_result, source->Mat, sourows * 3 * sizeof(double), hipMemcpyHostToDevice);
double *dev_pt1, *dev_p1, *dev_px, *dev_l;
double *dev_p, *dev_sp;
cudaStatus = hipMalloc((void**)&dev_pt1, tarrows*sizeof(double));
cudaStatus = hipMalloc((void**)&dev_p1, sourows*sizeof(double));
cudaStatus = hipMalloc((void**)&dev_px, sourows*3*sizeof(double));
cudaStatus = hipMalloc((void**)&dev_l, tarrows*sizeof(double));
cudaStatus = hipMalloc((void**)&dev_p, tarrows*sourows*sizeof(double));
cudaStatus = hipMalloc((void**)&dev_sp, tarrows*sizeof(double));
double *dev_mu_x, *dev_mu_y;
double *dev_b1, *dev_b2, *dev_inv_b2;
double *dev_transform, *dev_translation;
cudaStatus = hipMalloc((void**)&dev_mu_x, 3 * sizeof(double));
cudaStatus = hipMalloc((void**)&dev_mu_y, 3 * sizeof(double));
cudaStatus = hipMalloc((void**)&dev_b1, 3 * 3 * sizeof(double));
cudaStatus = hipMalloc((void**)&dev_b2, 3 * 3 * sizeof(double));
cudaStatus = hipMalloc((void**)&dev_inv_b2, 3 * 3 * sizeof(double));
cudaStatus = hipMalloc((void**)&dev_transform, 3 * 3 * sizeof(double));
cudaStatus = hipMalloc((void**)&dev_translation, 3 * sizeof(double));
//loop caculation
while (iter < max_iter && ntol > tolerance &&
res_sigma2 > 10 * std::numeric_limits<double>::epsilon())
{
double ksig = -2.0 * res_sigma2;
int cols = target->cols;
double outlier_tmp =
(outliers * sourows * ::pow(-ksig * M_PI, 0.5 * cols)) /
((1 - outliers) * tarrows);
//compute p
Probability1 << <problocks, prothreads>> >(dev_target, dev_result, dev_p, ksig, tarrows, sourows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//compute sp and pt1
Probability2 << <(tarrows + 31) / 32, 32 >> >(dev_p, dev_sp, dev_pt1, dev_l, outlier_tmp, tarrows, sourows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//compute px and p1
Probability3 << <(sourows + 31) / 32, 32 >> >(dev_target, dev_p, dev_sp, dev_p1, dev_px, tarrows, sourows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//compute l
Computel << <num1, 1024 >> >(dev_l, dev_cache_tar, tarrows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(cache_tar, dev_cache_tar, num1 * sizeof(double), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
printf("%s\n", hipGetErrorString(cudaStatus));
goto Error;
}
double pro_l = 0;
for (int i = 0; i < num1; i++)
{
pro_l += cache_tar[i];
}
pro_l += cols * tarrows * ::log(res_sigma2) / 2;
ntol = std::abs((pro_l - l) / pro_l);
l = pro_l;
//compute np
hipLaunchKernelGGL(( ComputeNp), dim3(num2),dim3(1024), 0, 0, dev_p1, dev_cache_sou, sourows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(cache_sou, dev_cache_sou, num2 * sizeof(double), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
printf("%s\n", hipGetErrorString(cudaStatus));
goto Error;
}
double np = 0;
for (int i = 0; i < num2; i++)
{
np += cache_sou[i];
}
//compute mu_x and mu_y
ComputeMu<< <1, 6 >> >(dev_target, dev_source, dev_p1, dev_pt1, dev_mu_x, dev_mu_y, tarrows, sourows, np);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//compute b1 and b2
ComputeB<< <comblocks, comthreads >> >(dev_b1, dev_b2, dev_px, dev_p1, dev_mu_x, dev_mu_y, dev_source, np, sourows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//compute transform and translation
ComputeTrans<< <transblocks, transthreads >> >(dev_b1, dev_b2, dev_inv_b2, dev_mu_x, dev_mu_y, dev_transform, dev_translation);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// compute sigma2
ComputeSigma1<< <num1, 1024 >> >(dev_target, dev_pt1, dev_cache_tar, tarrows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
ComputeSigma2<< <1, 1 >> >(dev_cache_tar, dev_mu_x, dev_mu_y, dev_b1, dev_transform, dev_sigma2, np, num1);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(&res_sigma2, dev_sigma2, sizeof(double), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
printf("%s\n", hipGetErrorString(cudaStatus));
goto Error;
}
//compute result points
ComputeRes<< <resblocks, resthreads >> >(dev_source, dev_transform, dev_translation, dev_result, sourows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
++iter;
}
//print the iterations, ntol and sigma2
printf("iter = %d\n", iter);
printf("ntol = %f\n",ntol);
printf("res_sigma2 = %f\n",res_sigma2);
/*Denormalize << <resblocks, resthreads >> >(dev_result, dev_target, dev_transform, dev_translation, dev_tar_means, dev_sou_means, scale, sourows);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}*/
//return transform
cudaStatus = hipMemcpy(transform, dev_transform, 3 * 3 * sizeof(double), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
printf("%s\n", hipGetErrorString(cudaStatus));
goto Error;
}
//return translation
cudaStatus = hipMemcpy(translation, dev_translation, 3 * sizeof(double), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
printf("%s\n", hipGetErrorString(cudaStatus));
goto Error;
}
//return last result
cudaStatus = hipMemcpy(result, dev_result, sourows * 3 * sizeof(double), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
printf("%s\n", hipGetErrorString(cudaStatus));
goto Error;
}
//free the memory
Error:
free(cache_tar);
free(cache_sou);
hipFree(dev_source);
hipFree(dev_target);
hipFree(dev_pt1);
hipFree(dev_p1);
hipFree(dev_px);
hipFree(dev_l);
hipFree(dev_p);
hipFree(dev_sp);
hipFree(dev_result);
hipFree(dev_b1);
hipFree(dev_b2);
hipFree(dev_cache_tar);
hipFree(dev_inv_b2);
hipFree(dev_mu_x);
hipFree(dev_mu_y);
hipFree(dev_transform);
hipFree(dev_translation);
hipFree(dev_sigma2);
hipFree(dev_cache_sou);
hipFree(dev_tar_means);
hipFree(dev_sou_means);
return cudaStatus;
} | da08e6b178cbe97a2991c2f7088708729f39eda3.cu |
#include "kernel.cuh"
/*__global__ void PointsMean(double *points, double *means, int rows)
{
int tid = threadIdx.x;
if (tid < 3){
for (int i = 0; i < rows; i++)
{
means[tid] += points[i * 3 + tid];
}
means[tid] /= rows;
}
}
__global__ void PointsDis(double* points,double *means, int rows)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < 3)
{
points[i * 3 + j] -= means[j];
}
}
__global__ void PointsScale(double *points, double *cache, int rows)
{
__shared__ double temp[1024];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int tempIndex = threadIdx.x;
temp[tempIndex] = 0;
if (tid < rows)
{
temp[tempIndex] = pow(points[tid],2)/rows;
}
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (tempIndex < i)
{
temp[tempIndex] += temp[tempIndex + i];
}
__syncthreads();
i /= 2;
}
if (tempIndex == 0)
{
cache[blockIdx.x] = temp[0];
}
}
__global__ void PointsNorm(double *points,double scale, int rows)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < 3)
{
points[i * 3 + j] /= scale;
}
}*/
__global__ void Probability1(double* target, double* source, double* p, double ksig, int tarrows, int sourows)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < tarrows && j < sourows)
{
double razn = pow(target[3 * i] - source[3 * j], 2) + pow(target[3 * i + 1] - source[3 * j + 1], 2) +
pow(target[3 * i + 2] - source[3 * j + 2], 2);
p[i*sourows + j] = exp(razn / ksig);
}
}
__global__ void Probability2(double* p, double* sp, double* pt1, double*l, double outlier_tmp, int tarrows, int sourows)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < tarrows)
{
sp[i] = 0;
for (int j = 0; j < sourows; j++)
{
sp[i] += p[i*sourows + j];
}
sp[i] += outlier_tmp;
pt1[i] = 1 - outlier_tmp / sp[i];
l[i] = -log(sp[i]);
}
}
__global__ void Probability3(double* target, double* p, double* sp, double* p1, double* px, int tarrows, int sourows)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (j < sourows)
{
double temp;
px[j * 3] = 0;
px[j * 3 + 1] = 0;
px[j * 3 + 2] = 0;
p1[j] = 0;
for (int i = 0; i < tarrows; i++)
{
temp = p[i*sourows + j] / sp[i];
p1[j] += temp;
px[j * 3] += target[i * 3] * temp;
px[j * 3 + 1] += target[i * 3 + 1] * temp;
px[j * 3 + 2] += target[i * 3 + 2] * temp;
}
}
}
__global__ void Computel(double* temp_l, double* cache, int tarrows)
{
__shared__ double temp[1024];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int tempIndex = threadIdx.x;
temp[tempIndex] = 0;
if (tid < tarrows)
{
temp[tempIndex] = temp_l[tid];
}
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (tempIndex < i)
{
temp[tempIndex] += temp[tempIndex + i];
}
__syncthreads();
i /= 2;
}
if (tempIndex == 0)
{
cache[blockIdx.x] = temp[0];
}
}
__global__ void ComputeNp(double* p1, double* cache, int sourows)
{
__shared__ double temp[1024];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int tempIndex = threadIdx.x;
temp[tempIndex] = 0;
if (tid < sourows)
{
temp[tempIndex] = p1[tid];
}
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (tempIndex < i)
{
temp[tempIndex] += temp[tempIndex + i];
}
__syncthreads();
i /= 2;
}
if (tempIndex == 0)
{
cache[blockIdx.x] = temp[0];
}
}
__global__ void ComputeMu(double* target, double* source, double* p1, double* pt1, double *mu_x, double *mu_y, int tarrows, int sourows, double np)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < 6)
{
if (tid < 3)
{
int i = tid;
mu_x[i] = 0;
for (int dy = 0; dy < tarrows; dy++)
{
mu_x[i] += target[dy * 3 + i] * pt1[dy] / np;
}
}
else
{
int i = tid - 3;
mu_y[i] = 0;
for (int dy = 0; dy < sourows; dy++)
{
mu_y[i] += source[dy * 3 + i] * p1[dy] / np;
}
}
}
}
__global__ void ComputeB(double *b1, double *b2, double *px, double *p1,double *mu_x, double *mu_y, double *source, double np, int sourows)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < 3 && j < 6)
{
if (j < 3)
{
b1[i * 3 + j] = 0;
for (int dy = 0; dy < sourows; dy++)
{
b1[i * 3 + j] += px[dy * 3 + i] * source[dy * 3 + j];
}
b1[i * 3 + j] -= np * mu_x[i] * mu_y[j];
}
else
{
int x = i;
int y = j - 3;
b2[x * 3 + y] = 0;
for (int dy = 0; dy < sourows; dy++)
{
b2[x * 3 + y] += source[dy * 3 + x] * p1[dy] * source[dy * 3 + y];
}
b2[x * 3 + y] -= np * mu_y[x] * mu_y[y];
}
}
}
__global__ void ComputeTrans(double *b1,double *b2, double *inv_b2, double *mu_x, double *mu_y, double *transform, double *translation)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < 3 && j < 3)
{
double temp[4];
double det = b2[0] * b2[4] * b2[8] + b2[3] * b2[7] * b2[2] + b2[6] * b2[1] * b2[5]
- b2[0] * b2[7] * b2[5] - b2[3] * b2[1] * b2[8] - b2[6] * b2[4] * b2[2];
int tid = 0;
for (int x = 0; x < 3; x++){
if (x != j){
for (int y = 0; y < 3; y++){
if (y != i){
temp[tid] = b2[x * 3 + y];
tid++;
}
}
}
}
inv_b2[i * 3 + j] = pow(-1.0, i + j) * (temp[0] * temp[3] - temp[1] * temp[2]) / det;
__syncthreads();
transform[i * 3 + j] = b1[i * 3] * inv_b2[j] + b1[i * 3 + 1] * inv_b2[3 + j] + b1[i * 3 + 2] * inv_b2[6 + j];
__syncthreads();
if (j == 0)
{
translation[i] = mu_x[i] -
(transform[i * 3] * mu_y[0] + transform[i * 3 + 1] * mu_y[1] + transform[i * 3 + 2] * mu_y[2]);
}
}
}
__global__ void ComputeSigma1(double* target, double* pt1, double* cache, int tarrows)
{
__shared__ double temp[1024];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int tempIndex = threadIdx.x;
temp[tempIndex] = 0;
if (tid < tarrows)
{
temp[tempIndex] = (pow(target[tid * 3], 2) + pow(target[tid * 3 + 1], 2) + pow(target[tid * 3 + 2], 2))* pt1[tid];
}
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (tempIndex < i)
{
temp[tempIndex] += temp[tempIndex + i];
}
__syncthreads();
i /= 2;
}
if (tempIndex == 0)
{
cache[blockIdx.x] = temp[0];
}
}
__global__ void ComputeSigma2(double *cache, double *mu_x, double *mu_y, double *b1, double *transform, double *sigma2, double np, int num)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid == 0)
{
*sigma2 = 0;
for (int i = 0; i < num; i++)
{
*sigma2 += cache[i];
}
for (int i = 0; i < 3; i++)
{
*sigma2 += -np * mu_x[i] * mu_x[i] -
(b1[i * 3] * transform[i] + b1[i * 3 + 1] * transform[3 + i] + b1[i * 3 + 2] * transform[6 + i]);
}
*sigma2 /= np * 3;
}
}
__global__ void ComputeRes(double* source, double* transform, double* translation, double* result, int sourows)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < sourows && j < 3)
{
result[i * 3 + j] = source[i * 3] * transform[j*3] + source[i * 3 + 1] * transform[j*3 + 1] + source[i * 3 + 2] * transform[j*3 + 2] + translation[j];
}
}
/*__global__ void Denormalize(double *result, double *target, double *transform, double *translation, double *tar_means, double *sou_means, double scale, int sourows)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < 3 && j == 0)
{
translation[i] = scale*translation[i] + tar_means[i];
for (int tid = 0; tid < 3; tid++)
{
translation[i] -= transform[i * 3 + tid] * sou_means[tid];
}
}
if (i < sourows && j < 3)
{
result[i * 3 + j] = result[i * 3 + j] * scale + tar_means[j];
}
}*/
cudaError_t Compute(myMatrix* target, myMatrix* source, double* result, double* transform, double* translation, double sigma2, double outliers, double tolerance, int max_iter)
{
//define the initial value
double ntol = tolerance + 10.0;
double l = 0.0;
int iter = 0;
double res_sigma2 = sigma2;
int tarrows = target->rows;
int sourows = source->rows;
dim3 tarnormblocks((tarrows + 31) / 32, 1);
dim3 tarnormthreads(32, 3);
dim3 sounormblocks((sourows + 31) / 32, 1);
dim3 sounormthreads(32, 3);
dim3 problocks((tarrows + 31) / 32, (sourows + 31) / 32);
dim3 prothreads(32, 32);
dim3 comblocks(1,1);
dim3 comthreads(3,6);
dim3 transblocks(1, 1);
dim3 transthreads(3, 3);
dim3 resblocks((sourows + 31) / 32,1);
dim3 resthreads(32, 3);
int num1 = (tarrows + 1023) / 1024;
int num2 = (sourows + 1023) / 1024;
//CPU memory allocation
double *cache_tar = (double*)malloc(num1* sizeof(double));
double *cache_sou = (double*)malloc(num2* sizeof(double));
//GPU memory allocation
cudaError_t cudaStatus;
double *dev_target, *dev_source, *dev_result;
cudaStatus = cudaMalloc((void**)&dev_target, tarrows*3*sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_source, sourows*3*sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_result, sourows*3*sizeof(double));
cudaStatus = cudaMemcpy(dev_target, target->Mat, tarrows * 3 * sizeof(double), cudaMemcpyHostToDevice);
cudaStatus = cudaMemcpy(dev_source, source->Mat, sourows * 3 * sizeof(double), cudaMemcpyHostToDevice);
double *dev_tar_means, *dev_sou_means;
cudaStatus = cudaMalloc((void**)&dev_tar_means, 3 * sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_sou_means, 3 * sizeof(double));
double *dev_cache_tar, *dev_cache_sou, *dev_sigma2;
cudaStatus = cudaMalloc((void**)&dev_cache_tar, num1* sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_cache_sou, num2* sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_sigma2, sizeof(double));
/*PointsMean << <1, 3 >> >(dev_target, dev_tar_means, tarrows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
PointsDis << <tarnormblocks, tarnormthreads >> >(dev_target, dev_tar_means, tarrows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
PointsScale << <num1, 1024 >> >(dev_target, dev_cache_tar, tarrows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(cache_tar, dev_cache_tar, num1 * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
printf("%s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
double scale_tar = 0;
for (int i = 0; i < num1; i++)
{
scale_tar += cache_tar[i];
}
PointsMean << <1, 3 >> >(dev_source, dev_sou_means, sourows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
PointsDis << <sounormblocks, sounormthreads >> >(dev_source, dev_sou_means, sourows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
PointsScale << <num2, 1024 >> >(dev_source, dev_cache_sou, sourows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(cache_sou, dev_cache_sou, num2 * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
printf("%s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
double scale_sou = 0;
for (int i = 0; i < num1; i++)
{
scale_sou += cache_tar[i];
}
double scale = std::max(scale_tar, scale_sou);
PointsNorm << <tarnormblocks, tarnormthreads >> >(dev_target, scale, tarrows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
PointsNorm << <sounormblocks, sounormthreads >> >(dev_source, scale, sourows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}*/
cudaStatus = cudaMemcpy(dev_result, source->Mat, sourows * 3 * sizeof(double), cudaMemcpyHostToDevice);
double *dev_pt1, *dev_p1, *dev_px, *dev_l;
double *dev_p, *dev_sp;
cudaStatus = cudaMalloc((void**)&dev_pt1, tarrows*sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_p1, sourows*sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_px, sourows*3*sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_l, tarrows*sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_p, tarrows*sourows*sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_sp, tarrows*sizeof(double));
double *dev_mu_x, *dev_mu_y;
double *dev_b1, *dev_b2, *dev_inv_b2;
double *dev_transform, *dev_translation;
cudaStatus = cudaMalloc((void**)&dev_mu_x, 3 * sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_mu_y, 3 * sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_b1, 3 * 3 * sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_b2, 3 * 3 * sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_inv_b2, 3 * 3 * sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_transform, 3 * 3 * sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_translation, 3 * sizeof(double));
//loop caculation
while (iter < max_iter && ntol > tolerance &&
res_sigma2 > 10 * std::numeric_limits<double>::epsilon())
{
double ksig = -2.0 * res_sigma2;
int cols = target->cols;
double outlier_tmp =
(outliers * sourows * std::pow(-ksig * M_PI, 0.5 * cols)) /
((1 - outliers) * tarrows);
//compute p
Probability1 << <problocks, prothreads>> >(dev_target, dev_result, dev_p, ksig, tarrows, sourows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//compute sp and pt1
Probability2 << <(tarrows + 31) / 32, 32 >> >(dev_p, dev_sp, dev_pt1, dev_l, outlier_tmp, tarrows, sourows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//compute px and p1
Probability3 << <(sourows + 31) / 32, 32 >> >(dev_target, dev_p, dev_sp, dev_p1, dev_px, tarrows, sourows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//compute l
Computel << <num1, 1024 >> >(dev_l, dev_cache_tar, tarrows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(cache_tar, dev_cache_tar, num1 * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
printf("%s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
double pro_l = 0;
for (int i = 0; i < num1; i++)
{
pro_l += cache_tar[i];
}
pro_l += cols * tarrows * std::log(res_sigma2) / 2;
ntol = std::abs((pro_l - l) / pro_l);
l = pro_l;
//compute np
ComputeNp<<<num2,1024>>>(dev_p1, dev_cache_sou, sourows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(cache_sou, dev_cache_sou, num2 * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
printf("%s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
double np = 0;
for (int i = 0; i < num2; i++)
{
np += cache_sou[i];
}
//compute mu_x and mu_y
ComputeMu<< <1, 6 >> >(dev_target, dev_source, dev_p1, dev_pt1, dev_mu_x, dev_mu_y, tarrows, sourows, np);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//compute b1 and b2
ComputeB<< <comblocks, comthreads >> >(dev_b1, dev_b2, dev_px, dev_p1, dev_mu_x, dev_mu_y, dev_source, np, sourows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//compute transform and translation
ComputeTrans<< <transblocks, transthreads >> >(dev_b1, dev_b2, dev_inv_b2, dev_mu_x, dev_mu_y, dev_transform, dev_translation);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// compute sigma2
ComputeSigma1<< <num1, 1024 >> >(dev_target, dev_pt1, dev_cache_tar, tarrows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
ComputeSigma2<< <1, 1 >> >(dev_cache_tar, dev_mu_x, dev_mu_y, dev_b1, dev_transform, dev_sigma2, np, num1);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(&res_sigma2, dev_sigma2, sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
printf("%s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
//compute result points
ComputeRes<< <resblocks, resthreads >> >(dev_source, dev_transform, dev_translation, dev_result, sourows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
++iter;
}
//print the iterations, ntol and sigma2
printf("iter = %d\n", iter);
printf("ntol = %f\n",ntol);
printf("res_sigma2 = %f\n",res_sigma2);
/*Denormalize << <resblocks, resthreads >> >(dev_result, dev_target, dev_transform, dev_translation, dev_tar_means, dev_sou_means, scale, sourows);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}*/
//return transform
cudaStatus = cudaMemcpy(transform, dev_transform, 3 * 3 * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
printf("%s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
//return translation
cudaStatus = cudaMemcpy(translation, dev_translation, 3 * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
printf("%s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
//return last result
cudaStatus = cudaMemcpy(result, dev_result, sourows * 3 * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
printf("%s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
//free the memory
Error:
free(cache_tar);
free(cache_sou);
cudaFree(dev_source);
cudaFree(dev_target);
cudaFree(dev_pt1);
cudaFree(dev_p1);
cudaFree(dev_px);
cudaFree(dev_l);
cudaFree(dev_p);
cudaFree(dev_sp);
cudaFree(dev_result);
cudaFree(dev_b1);
cudaFree(dev_b2);
cudaFree(dev_cache_tar);
cudaFree(dev_inv_b2);
cudaFree(dev_mu_x);
cudaFree(dev_mu_y);
cudaFree(dev_transform);
cudaFree(dev_translation);
cudaFree(dev_sigma2);
cudaFree(dev_cache_sou);
cudaFree(dev_tar_means);
cudaFree(dev_sou_means);
return cudaStatus;
} |
268998b66a29ae03db841d3bdcbf8a2d923e3330.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2018,2019,2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/arange.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T>
__global__ void make_sequence(const Size_t size, T *dst, const float start,
const float step) {
NBLA_CUDA_KERNEL_LOOP(i, size) { dst[i] = start + i * step; }
}
template <typename T>
void ArangeCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
if (outputs[0]->size() > 0) {
cuda_set_device(this->device_);
auto y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(make_sequence<Tcu>, outputs[0]->size(), y,
this->start_, this->step_);
}
}
} // namespace nbla
| 268998b66a29ae03db841d3bdcbf8a2d923e3330.cu | // Copyright 2018,2019,2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/arange.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T>
__global__ void make_sequence(const Size_t size, T *dst, const float start,
const float step) {
NBLA_CUDA_KERNEL_LOOP(i, size) { dst[i] = start + i * step; }
}
template <typename T>
void ArangeCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
if (outputs[0]->size() > 0) {
cuda_set_device(this->device_);
auto y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(make_sequence<Tcu>, outputs[0]->size(), y,
this->start_, this->step_);
}
}
} // namespace nbla
|
b7416fea757c92de1671f1c7eab8db9b1be2688a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "layers/convolution.h"
#include "layers/cuda/macros.h"
#include "layers/cuda/utils.h"
namespace graphdl
{
namespace core
{
namespace layers
{
namespace cuda
{
namespace
{
// params = [inShape, outShape, kerShape, strides]
__constant__ int shapeParams[14];
#define IN_SHAPE shapeParams
#define OUT_SHAPE (shapeParams + 4)
#define KER_SHAPE (shapeParams + 8)
#define strideX (shapeParams[12])
#define strideY (shapeParams[13])
template <PaddingType padding>
__global__ void conv2D_nhwc_kernel(const float* in, const float* ker,
float* out)
{
int x_out = blockIdx.x * blockDim.x + threadIdx.x;
int y_out = blockIdx.y * blockDim.y + threadIdx.y;
int n = blockIdx.z * blockDim.z + threadIdx.z;
int c_out = n % OUT_SHAPE[3];
n /= OUT_SHAPE[3];
if (n < OUT_SHAPE[0] && x_out < OUT_SHAPE[1] && y_out < OUT_SHAPE[2])
{
float val = 0;
int x_in = x_out * strideX, y_in = y_out * strideY;
if (padding == PaddingType::kSAME)
{
x_in -= (KER_SHAPE[0] - 1) / 2;
y_in -= (KER_SHAPE[1] - 1) / 2;
}
for (int dx = x_in < 0 ? -x_in : 0; dx < KER_SHAPE[0]; ++dx)
{
if (x_in + dx >= IN_SHAPE[1]) break;
for (int dy = y_in < 0 ? -y_in : 0; dy < KER_SHAPE[1]; ++dy)
{
if (y_in + dy >= IN_SHAPE[2]) break;
for (int c_in = 0; c_in < KER_SHAPE[2]; ++c_in)
val += in[POS_4D(n, x_in + dx, y_in + dy, c_in, IN_SHAPE)] *
ker[POS_4D(dx, dy, c_in, c_out, KER_SHAPE)];
}
}
out[POS_4D(n, x_out, y_out, c_out, OUT_SHAPE)] = val;
}
}
template <PaddingType padding>
__global__ void conv2D_nchw_kernel(const float* in, const float* ker,
float* out)
{
int x_out = blockIdx.x * blockDim.x + threadIdx.x;
int y_out = blockIdx.y * blockDim.y + threadIdx.y;
int n = blockIdx.z * blockDim.z + threadIdx.z;
int c_out = n % OUT_SHAPE[1];
n /= OUT_SHAPE[1];
if (n < OUT_SHAPE[0] && x_out < OUT_SHAPE[2] && y_out < OUT_SHAPE[3])
{
float val = 0;
int x_in = x_out * strideX, y_in = y_out * strideY;
if (padding == PaddingType::kSAME)
{
x_in -= (KER_SHAPE[0] - 1) / 2;
y_in -= (KER_SHAPE[1] - 1) / 2;
}
for (int dx = x_in < 0 ? -x_in : 0; dx < KER_SHAPE[0]; ++dx)
{
if (x_in + dx >= IN_SHAPE[2]) break;
for (int dy = y_in < 0 ? -y_in : 0; dy < KER_SHAPE[1]; ++dy)
{
if (y_in + dy >= IN_SHAPE[3]) break;
for (int c_in = 0; c_in < KER_SHAPE[2]; ++c_in)
val += in[POS_4D(n, c_in, x_in + dx, y_in + dy, IN_SHAPE)] *
ker[POS_4D(dx, dy, c_in, c_out, KER_SHAPE)];
}
}
out[POS_4D(n, c_out, x_out, y_out, OUT_SHAPE)] = val;
}
}
template <PaddingType padding>
__global__ void conv2D_grad_x_nhwc_kernel(const float* ker, const float* outG,
float* inG)
{
int x_out = blockIdx.x * blockDim.x + threadIdx.x;
int y_out = blockIdx.y * blockDim.y + threadIdx.y;
int n = blockIdx.z * blockDim.z + threadIdx.z;
int c_out = n % OUT_SHAPE[3];
n /= OUT_SHAPE[3];
if (n < OUT_SHAPE[0] && x_out < OUT_SHAPE[1] && y_out < OUT_SHAPE[2])
{
int x_in = x_out * strideX, y_in = y_out * strideY;
if (padding == PaddingType::kSAME)
{
x_in -= (KER_SHAPE[0] - 1) / 2;
y_in -= (KER_SHAPE[1] - 1) / 2;
}
float outG_val = outG[POS_4D(n, x_out, y_out, c_out, OUT_SHAPE)];
for (int dx = x_in < 0 ? -x_in : 0; dx < KER_SHAPE[0]; ++dx)
{
if (x_in + dx >= IN_SHAPE[1]) break;
for (int dy = y_in < 0 ? -y_in : 0; dy < KER_SHAPE[1]; ++dy)
{
if (y_in + dy >= IN_SHAPE[2]) break;
for (int c_in = 0; c_in < KER_SHAPE[2]; ++c_in)
{
float val =
outG_val * ker[POS_4D(dx, dy, c_in, c_out, KER_SHAPE)];
atomicAdd(
&inG[POS_4D(n, x_in + dx, y_in + dy, c_in, IN_SHAPE)],
val);
}
}
}
}
}
template <PaddingType padding>
__global__ void conv2D_grad_k_nhwc_kernel(const float* in, const float* outG,
float* kerG)
{
int x_out = blockIdx.x * blockDim.x + threadIdx.x;
int y_out = blockIdx.y * blockDim.y + threadIdx.y;
int n = blockIdx.z * blockDim.z + threadIdx.z;
int c_out = n % OUT_SHAPE[3];
n /= OUT_SHAPE[3];
if (n < OUT_SHAPE[0] && x_out < OUT_SHAPE[1] && y_out < OUT_SHAPE[2])
{
int x_in = x_out * strideX, y_in = y_out * strideY;
if (padding == PaddingType::kSAME)
{
x_in -= (KER_SHAPE[0] - 1) / 2;
y_in -= (KER_SHAPE[1] - 1) / 2;
}
float outG_val = outG[POS_4D(n, x_out, y_out, c_out, OUT_SHAPE)];
for (int dx = x_in < 0 ? -x_in : 0; dx < KER_SHAPE[0]; ++dx)
{
if (x_in + dx >= IN_SHAPE[1]) break;
for (int dy = y_in < 0 ? -y_in : 0; dy < KER_SHAPE[1]; ++dy)
{
if (y_in + dy >= IN_SHAPE[2]) break;
for (int c_in = 0; c_in < KER_SHAPE[2]; ++c_in)
{
float val =
outG_val *
in[POS_4D(n, x_in + dx, y_in + dy, c_in, IN_SHAPE)];
atomicAdd(&kerG[POS_4D(dx, dy, c_in, c_out, KER_SHAPE)],
val);
}
}
}
}
}
template <PaddingType padding>
__global__ void conv2D_grad_x_nchw_kernel(const float* ker, const float* outG,
float* inG)
{
int x_out = blockIdx.x * blockDim.x + threadIdx.x;
int y_out = blockIdx.y * blockDim.y + threadIdx.y;
int n = blockIdx.z * blockDim.z + threadIdx.z;
int c_out = n % OUT_SHAPE[1];
n /= OUT_SHAPE[1];
if (n < OUT_SHAPE[0] && x_out < OUT_SHAPE[2] && y_out < OUT_SHAPE[3])
{
int x_in = x_out * strideX, y_in = y_out * strideY;
if (padding == PaddingType::kSAME)
{
x_in -= (KER_SHAPE[0] - 1) / 2;
y_in -= (KER_SHAPE[1] - 1) / 2;
}
float outG_val = outG[POS_4D(n, c_out, x_out, y_out, OUT_SHAPE)];
for (int dx = x_in < 0 ? -x_in : 0; dx < KER_SHAPE[0]; ++dx)
{
if (x_in + dx >= IN_SHAPE[2]) break;
for (int dy = y_in < 0 ? -y_in : 0; dy < KER_SHAPE[1]; ++dy)
{
if (y_in + dy >= IN_SHAPE[3]) break;
for (int c_in = 0; c_in < KER_SHAPE[2]; ++c_in)
{
float val =
outG_val * ker[POS_4D(dx, dy, c_in, c_out, KER_SHAPE)];
atomicAdd(
&inG[POS_4D(n, c_in, x_in + dx, y_in + dy, IN_SHAPE)],
val);
}
}
}
}
}
template <PaddingType padding>
__global__ void conv2D_grad_k_nchw_kernel(const float* in, const float* outG,
float* kerG)
{
int x_out = blockIdx.x * blockDim.x + threadIdx.x;
int y_out = blockIdx.y * blockDim.y + threadIdx.y;
int n = blockIdx.z * blockDim.z + threadIdx.z;
int c_out = n % OUT_SHAPE[1];
n /= OUT_SHAPE[1];
if (n < OUT_SHAPE[0] && x_out < OUT_SHAPE[2] && y_out < OUT_SHAPE[3])
{
int x_in = x_out * strideX, y_in = y_out * strideY;
if (padding == PaddingType::kSAME)
{
x_in -= (KER_SHAPE[0] - 1) / 2;
y_in -= (KER_SHAPE[1] - 1) / 2;
}
float outG_val = outG[POS_4D(n, c_out, x_out, y_out, OUT_SHAPE)];
for (int dx = x_in < 0 ? -x_in : 0; dx < KER_SHAPE[0]; ++dx)
{
if (x_in + dx >= IN_SHAPE[2]) break;
for (int dy = y_in < 0 ? -y_in : 0; dy < KER_SHAPE[1]; ++dy)
{
if (y_in + dy >= IN_SHAPE[3]) break;
for (int c_in = 0; c_in < KER_SHAPE[2]; ++c_in)
{
float val =
outG_val *
in[POS_4D(n, c_in, x_in + dx, y_in + dy, IN_SHAPE)];
atomicAdd(&kerG[POS_4D(dx, dy, c_in, c_out, KER_SHAPE)],
val);
}
}
}
}
}
} // namespace
void runConv2DDevice(const float* x, const float* k, float* y,
const int* params, PaddingType padding,
DataFormat dataFormat)
{
const int TILE = 8;
const dim3 BLOCK(TILE, TILE, TILE);
dim3 GRID;
if (dataFormat == DataFormat::kNHWC)
GRID =
dim3((params[5] + TILE - 1) / TILE, (params[6] + TILE - 1) / TILE,
(params[4] * params[7] + TILE - 1) / TILE);
else
GRID =
dim3((params[6] + TILE - 1) / TILE, (params[7] + TILE - 1) / TILE,
(params[4] * params[5] + TILE - 1) / TILE);
hipMemcpyToSymbol(shapeParams, params, 14 * sizeof(int));
if (dataFormat == DataFormat::kNHWC)
{
if (padding == PaddingType::kVALID)
hipLaunchKernelGGL(( conv2D_nhwc_kernel<PaddingType::kVALID>), dim3(GRID), dim3(BLOCK), 0, 0, x, k, y);
else // padding == PaddingType::kSAME
hipLaunchKernelGGL(( conv2D_nhwc_kernel<PaddingType::kSAME>), dim3(GRID), dim3(BLOCK), 0, 0, x, k, y);
}
else // dataFormat == DataFormat::kNCHW
{
if (padding == PaddingType::kVALID)
hipLaunchKernelGGL(( conv2D_nchw_kernel<PaddingType::kVALID>), dim3(GRID), dim3(BLOCK), 0, 0, x, k, y);
else // padding == PaddingType::kSAME
hipLaunchKernelGGL(( conv2D_nchw_kernel<PaddingType::kSAME>), dim3(GRID), dim3(BLOCK), 0, 0, x, k, y);
}
}
void runConv2DGradientDevice(const float* x, const float* k, const float* yG,
float* xG, float* kG, const int* params,
PaddingType padding, DataFormat dataFormat)
{
const int TILE = 8;
const dim3 BLOCK(TILE, TILE, TILE);
dim3 GRID;
if (dataFormat == DataFormat::kNHWC)
GRID =
dim3((params[5] + TILE - 1) / TILE, (params[6] + TILE - 1) / TILE,
(params[4] * params[7] + TILE - 1) / TILE);
else
GRID =
dim3((params[6] + TILE - 1) / TILE, (params[7] + TILE - 1) / TILE,
(params[4] * params[5] + TILE - 1) / TILE);
hipMemcpyToSymbol(shapeParams, params, 14 * sizeof(int));
size_t size = params[0] * params[1] * params[2] * params[3];
utils::fill(xG, size, 0.);
size = params[8] * params[9] * params[10] * params[11];
utils::fill(kG, size, 0.);
if (dataFormat == DataFormat::kNHWC)
{
if (padding == PaddingType::kVALID)
{
hipLaunchKernelGGL(( conv2D_grad_x_nhwc_kernel<PaddingType::kVALID>)
, dim3(GRID), dim3(BLOCK), 0, 0, k, yG, xG);
hipLaunchKernelGGL(( conv2D_grad_k_nhwc_kernel<PaddingType::kVALID>)
, dim3(GRID), dim3(BLOCK), 0, 0, x, yG, kG);
}
else // padding == PaddingType::kSAME
{
hipLaunchKernelGGL(( conv2D_grad_x_nhwc_kernel<PaddingType::kSAME>)
, dim3(GRID), dim3(BLOCK), 0, 0, k, yG, xG);
hipLaunchKernelGGL(( conv2D_grad_k_nhwc_kernel<PaddingType::kSAME>)
, dim3(GRID), dim3(BLOCK), 0, 0, x, yG, kG);
}
}
else // dataFormat == DataFormat::kNCHW
{
if (padding == PaddingType::kVALID)
{
hipLaunchKernelGGL(( conv2D_grad_x_nchw_kernel<PaddingType::kVALID>)
, dim3(GRID), dim3(BLOCK), 0, 0, k, yG, xG);
hipLaunchKernelGGL(( conv2D_grad_k_nchw_kernel<PaddingType::kVALID>)
, dim3(GRID), dim3(BLOCK), 0, 0, x, yG, kG);
}
else // padding == PaddingType::kSAME
{
hipLaunchKernelGGL(( conv2D_grad_x_nchw_kernel<PaddingType::kSAME>)
, dim3(GRID), dim3(BLOCK), 0, 0, k, yG, xG);
hipLaunchKernelGGL(( conv2D_grad_k_nchw_kernel<PaddingType::kSAME>)
, dim3(GRID), dim3(BLOCK), 0, 0, x, yG, kG);
}
}
}
#undef IN_SHAPE
#undef OUT_SHAPE
#undef KER_SHAPE
#undef strideX
#undef strideY
} // namespace cuda
} // namespace layers
} // namespace core
} // namespace graphdl
| b7416fea757c92de1671f1c7eab8db9b1be2688a.cu | #include "layers/convolution.h"
#include "layers/cuda/macros.h"
#include "layers/cuda/utils.h"
namespace graphdl
{
namespace core
{
namespace layers
{
namespace cuda
{
namespace
{
// params = [inShape, outShape, kerShape, strides]
__constant__ int shapeParams[14];
#define IN_SHAPE shapeParams
#define OUT_SHAPE (shapeParams + 4)
#define KER_SHAPE (shapeParams + 8)
#define strideX (shapeParams[12])
#define strideY (shapeParams[13])
template <PaddingType padding>
__global__ void conv2D_nhwc_kernel(const float* in, const float* ker,
float* out)
{
int x_out = blockIdx.x * blockDim.x + threadIdx.x;
int y_out = blockIdx.y * blockDim.y + threadIdx.y;
int n = blockIdx.z * blockDim.z + threadIdx.z;
int c_out = n % OUT_SHAPE[3];
n /= OUT_SHAPE[3];
if (n < OUT_SHAPE[0] && x_out < OUT_SHAPE[1] && y_out < OUT_SHAPE[2])
{
float val = 0;
int x_in = x_out * strideX, y_in = y_out * strideY;
if (padding == PaddingType::kSAME)
{
x_in -= (KER_SHAPE[0] - 1) / 2;
y_in -= (KER_SHAPE[1] - 1) / 2;
}
for (int dx = x_in < 0 ? -x_in : 0; dx < KER_SHAPE[0]; ++dx)
{
if (x_in + dx >= IN_SHAPE[1]) break;
for (int dy = y_in < 0 ? -y_in : 0; dy < KER_SHAPE[1]; ++dy)
{
if (y_in + dy >= IN_SHAPE[2]) break;
for (int c_in = 0; c_in < KER_SHAPE[2]; ++c_in)
val += in[POS_4D(n, x_in + dx, y_in + dy, c_in, IN_SHAPE)] *
ker[POS_4D(dx, dy, c_in, c_out, KER_SHAPE)];
}
}
out[POS_4D(n, x_out, y_out, c_out, OUT_SHAPE)] = val;
}
}
template <PaddingType padding>
__global__ void conv2D_nchw_kernel(const float* in, const float* ker,
float* out)
{
int x_out = blockIdx.x * blockDim.x + threadIdx.x;
int y_out = blockIdx.y * blockDim.y + threadIdx.y;
int n = blockIdx.z * blockDim.z + threadIdx.z;
int c_out = n % OUT_SHAPE[1];
n /= OUT_SHAPE[1];
if (n < OUT_SHAPE[0] && x_out < OUT_SHAPE[2] && y_out < OUT_SHAPE[3])
{
float val = 0;
int x_in = x_out * strideX, y_in = y_out * strideY;
if (padding == PaddingType::kSAME)
{
x_in -= (KER_SHAPE[0] - 1) / 2;
y_in -= (KER_SHAPE[1] - 1) / 2;
}
for (int dx = x_in < 0 ? -x_in : 0; dx < KER_SHAPE[0]; ++dx)
{
if (x_in + dx >= IN_SHAPE[2]) break;
for (int dy = y_in < 0 ? -y_in : 0; dy < KER_SHAPE[1]; ++dy)
{
if (y_in + dy >= IN_SHAPE[3]) break;
for (int c_in = 0; c_in < KER_SHAPE[2]; ++c_in)
val += in[POS_4D(n, c_in, x_in + dx, y_in + dy, IN_SHAPE)] *
ker[POS_4D(dx, dy, c_in, c_out, KER_SHAPE)];
}
}
out[POS_4D(n, c_out, x_out, y_out, OUT_SHAPE)] = val;
}
}
template <PaddingType padding>
__global__ void conv2D_grad_x_nhwc_kernel(const float* ker, const float* outG,
float* inG)
{
int x_out = blockIdx.x * blockDim.x + threadIdx.x;
int y_out = blockIdx.y * blockDim.y + threadIdx.y;
int n = blockIdx.z * blockDim.z + threadIdx.z;
int c_out = n % OUT_SHAPE[3];
n /= OUT_SHAPE[3];
if (n < OUT_SHAPE[0] && x_out < OUT_SHAPE[1] && y_out < OUT_SHAPE[2])
{
int x_in = x_out * strideX, y_in = y_out * strideY;
if (padding == PaddingType::kSAME)
{
x_in -= (KER_SHAPE[0] - 1) / 2;
y_in -= (KER_SHAPE[1] - 1) / 2;
}
float outG_val = outG[POS_4D(n, x_out, y_out, c_out, OUT_SHAPE)];
for (int dx = x_in < 0 ? -x_in : 0; dx < KER_SHAPE[0]; ++dx)
{
if (x_in + dx >= IN_SHAPE[1]) break;
for (int dy = y_in < 0 ? -y_in : 0; dy < KER_SHAPE[1]; ++dy)
{
if (y_in + dy >= IN_SHAPE[2]) break;
for (int c_in = 0; c_in < KER_SHAPE[2]; ++c_in)
{
float val =
outG_val * ker[POS_4D(dx, dy, c_in, c_out, KER_SHAPE)];
atomicAdd(
&inG[POS_4D(n, x_in + dx, y_in + dy, c_in, IN_SHAPE)],
val);
}
}
}
}
}
template <PaddingType padding>
__global__ void conv2D_grad_k_nhwc_kernel(const float* in, const float* outG,
float* kerG)
{
int x_out = blockIdx.x * blockDim.x + threadIdx.x;
int y_out = blockIdx.y * blockDim.y + threadIdx.y;
int n = blockIdx.z * blockDim.z + threadIdx.z;
int c_out = n % OUT_SHAPE[3];
n /= OUT_SHAPE[3];
if (n < OUT_SHAPE[0] && x_out < OUT_SHAPE[1] && y_out < OUT_SHAPE[2])
{
int x_in = x_out * strideX, y_in = y_out * strideY;
if (padding == PaddingType::kSAME)
{
x_in -= (KER_SHAPE[0] - 1) / 2;
y_in -= (KER_SHAPE[1] - 1) / 2;
}
float outG_val = outG[POS_4D(n, x_out, y_out, c_out, OUT_SHAPE)];
for (int dx = x_in < 0 ? -x_in : 0; dx < KER_SHAPE[0]; ++dx)
{
if (x_in + dx >= IN_SHAPE[1]) break;
for (int dy = y_in < 0 ? -y_in : 0; dy < KER_SHAPE[1]; ++dy)
{
if (y_in + dy >= IN_SHAPE[2]) break;
for (int c_in = 0; c_in < KER_SHAPE[2]; ++c_in)
{
float val =
outG_val *
in[POS_4D(n, x_in + dx, y_in + dy, c_in, IN_SHAPE)];
atomicAdd(&kerG[POS_4D(dx, dy, c_in, c_out, KER_SHAPE)],
val);
}
}
}
}
}
template <PaddingType padding>
__global__ void conv2D_grad_x_nchw_kernel(const float* ker, const float* outG,
float* inG)
{
int x_out = blockIdx.x * blockDim.x + threadIdx.x;
int y_out = blockIdx.y * blockDim.y + threadIdx.y;
int n = blockIdx.z * blockDim.z + threadIdx.z;
int c_out = n % OUT_SHAPE[1];
n /= OUT_SHAPE[1];
if (n < OUT_SHAPE[0] && x_out < OUT_SHAPE[2] && y_out < OUT_SHAPE[3])
{
int x_in = x_out * strideX, y_in = y_out * strideY;
if (padding == PaddingType::kSAME)
{
x_in -= (KER_SHAPE[0] - 1) / 2;
y_in -= (KER_SHAPE[1] - 1) / 2;
}
float outG_val = outG[POS_4D(n, c_out, x_out, y_out, OUT_SHAPE)];
for (int dx = x_in < 0 ? -x_in : 0; dx < KER_SHAPE[0]; ++dx)
{
if (x_in + dx >= IN_SHAPE[2]) break;
for (int dy = y_in < 0 ? -y_in : 0; dy < KER_SHAPE[1]; ++dy)
{
if (y_in + dy >= IN_SHAPE[3]) break;
for (int c_in = 0; c_in < KER_SHAPE[2]; ++c_in)
{
float val =
outG_val * ker[POS_4D(dx, dy, c_in, c_out, KER_SHAPE)];
atomicAdd(
&inG[POS_4D(n, c_in, x_in + dx, y_in + dy, IN_SHAPE)],
val);
}
}
}
}
}
template <PaddingType padding>
__global__ void conv2D_grad_k_nchw_kernel(const float* in, const float* outG,
float* kerG)
{
int x_out = blockIdx.x * blockDim.x + threadIdx.x;
int y_out = blockIdx.y * blockDim.y + threadIdx.y;
int n = blockIdx.z * blockDim.z + threadIdx.z;
int c_out = n % OUT_SHAPE[1];
n /= OUT_SHAPE[1];
if (n < OUT_SHAPE[0] && x_out < OUT_SHAPE[2] && y_out < OUT_SHAPE[3])
{
int x_in = x_out * strideX, y_in = y_out * strideY;
if (padding == PaddingType::kSAME)
{
x_in -= (KER_SHAPE[0] - 1) / 2;
y_in -= (KER_SHAPE[1] - 1) / 2;
}
float outG_val = outG[POS_4D(n, c_out, x_out, y_out, OUT_SHAPE)];
for (int dx = x_in < 0 ? -x_in : 0; dx < KER_SHAPE[0]; ++dx)
{
if (x_in + dx >= IN_SHAPE[2]) break;
for (int dy = y_in < 0 ? -y_in : 0; dy < KER_SHAPE[1]; ++dy)
{
if (y_in + dy >= IN_SHAPE[3]) break;
for (int c_in = 0; c_in < KER_SHAPE[2]; ++c_in)
{
float val =
outG_val *
in[POS_4D(n, c_in, x_in + dx, y_in + dy, IN_SHAPE)];
atomicAdd(&kerG[POS_4D(dx, dy, c_in, c_out, KER_SHAPE)],
val);
}
}
}
}
}
} // namespace
void runConv2DDevice(const float* x, const float* k, float* y,
const int* params, PaddingType padding,
DataFormat dataFormat)
{
const int TILE = 8;
const dim3 BLOCK(TILE, TILE, TILE);
dim3 GRID;
if (dataFormat == DataFormat::kNHWC)
GRID =
dim3((params[5] + TILE - 1) / TILE, (params[6] + TILE - 1) / TILE,
(params[4] * params[7] + TILE - 1) / TILE);
else
GRID =
dim3((params[6] + TILE - 1) / TILE, (params[7] + TILE - 1) / TILE,
(params[4] * params[5] + TILE - 1) / TILE);
cudaMemcpyToSymbol(shapeParams, params, 14 * sizeof(int));
if (dataFormat == DataFormat::kNHWC)
{
if (padding == PaddingType::kVALID)
conv2D_nhwc_kernel<PaddingType::kVALID><<<GRID, BLOCK>>>(x, k, y);
else // padding == PaddingType::kSAME
conv2D_nhwc_kernel<PaddingType::kSAME><<<GRID, BLOCK>>>(x, k, y);
}
else // dataFormat == DataFormat::kNCHW
{
if (padding == PaddingType::kVALID)
conv2D_nchw_kernel<PaddingType::kVALID><<<GRID, BLOCK>>>(x, k, y);
else // padding == PaddingType::kSAME
conv2D_nchw_kernel<PaddingType::kSAME><<<GRID, BLOCK>>>(x, k, y);
}
}
void runConv2DGradientDevice(const float* x, const float* k, const float* yG,
float* xG, float* kG, const int* params,
PaddingType padding, DataFormat dataFormat)
{
const int TILE = 8;
const dim3 BLOCK(TILE, TILE, TILE);
dim3 GRID;
if (dataFormat == DataFormat::kNHWC)
GRID =
dim3((params[5] + TILE - 1) / TILE, (params[6] + TILE - 1) / TILE,
(params[4] * params[7] + TILE - 1) / TILE);
else
GRID =
dim3((params[6] + TILE - 1) / TILE, (params[7] + TILE - 1) / TILE,
(params[4] * params[5] + TILE - 1) / TILE);
cudaMemcpyToSymbol(shapeParams, params, 14 * sizeof(int));
size_t size = params[0] * params[1] * params[2] * params[3];
utils::fill(xG, size, 0.);
size = params[8] * params[9] * params[10] * params[11];
utils::fill(kG, size, 0.);
if (dataFormat == DataFormat::kNHWC)
{
if (padding == PaddingType::kVALID)
{
conv2D_grad_x_nhwc_kernel<PaddingType::kVALID>
<<<GRID, BLOCK>>>(k, yG, xG);
conv2D_grad_k_nhwc_kernel<PaddingType::kVALID>
<<<GRID, BLOCK>>>(x, yG, kG);
}
else // padding == PaddingType::kSAME
{
conv2D_grad_x_nhwc_kernel<PaddingType::kSAME>
<<<GRID, BLOCK>>>(k, yG, xG);
conv2D_grad_k_nhwc_kernel<PaddingType::kSAME>
<<<GRID, BLOCK>>>(x, yG, kG);
}
}
else // dataFormat == DataFormat::kNCHW
{
if (padding == PaddingType::kVALID)
{
conv2D_grad_x_nchw_kernel<PaddingType::kVALID>
<<<GRID, BLOCK>>>(k, yG, xG);
conv2D_grad_k_nchw_kernel<PaddingType::kVALID>
<<<GRID, BLOCK>>>(x, yG, kG);
}
else // padding == PaddingType::kSAME
{
conv2D_grad_x_nchw_kernel<PaddingType::kSAME>
<<<GRID, BLOCK>>>(k, yG, xG);
conv2D_grad_k_nchw_kernel<PaddingType::kSAME>
<<<GRID, BLOCK>>>(x, yG, kG);
}
}
}
#undef IN_SHAPE
#undef OUT_SHAPE
#undef KER_SHAPE
#undef strideX
#undef strideY
} // namespace cuda
} // namespace layers
} // namespace core
} // namespace graphdl
|
891c966355ff868ac672679afb15961adc9c09fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void sgemm_kernel(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.f;
for (int i = 0; i < K; ++i)
sum += A[row * K + i] * B[i * K + col];
C[row * M + col] = alpha * sum + beta * C[row * M + col];
} | 891c966355ff868ac672679afb15961adc9c09fe.cu | #include "includes.h"
__global__ void sgemm_kernel(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.f;
for (int i = 0; i < K; ++i)
sum += A[row * K + i] * B[i * K + col];
C[row * M + col] = alpha * sum + beta * C[row * M + col];
} |
7e46dd20374b6cc63ec2715c8b7540ce25fc75b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* als.cu
*
* Created on: Feb 10, 2015
* Author: Wei Tan ([email protected])
* Alternating Least Square for Matrix Factorization on CUDA 7.0+
* Code optimized for F = 100, and on cc 3.5, 3.7 platforms. Also tested in cc 5.2
*/
//do not use fp16 by default
//#define CUMF_USE_HALF
#define SURPASS_NAN // by ADRIAN HSU
#define USE_CG
//if cojugate gradient solver generates results in FP16
//#define CUMF_TT_FP16
//#define CUMF_XX_FP16
#define CG_ITER 6
//#define CUMF_SAVE_MODEL
#include "als.h"
#include "device_utilities.h"
#include "cg.h"
#include "host_utilities.h"
#include <fstream>
#include <assert.h>
#include <hip/hip_fp16.h>
#ifdef CUMF_USE_HALF
#define SCAN_BATCH 24
#else
#define SCAN_BATCH 28
#endif
#include <iostream>
using namespace std;
void saveDeviceFloatArrayToFile(string fileName, int size, float* d_array){
float* h_array;
cudacall(hipHostMalloc( (void** ) &h_array, size * sizeof(h_array[0])) );
cudacall(hipMemcpy(h_array, d_array, size * sizeof(h_array[0]),hipMemcpyDeviceToHost));
FILE * outfile = fopen(fileName.c_str(), "wb");
fwrite(h_array, sizeof(float), size, outfile);
fclose(outfile);
hipHostFree(h_array);
}
int updateX(const int batch_size, const int batch_offset, float * ythetaT, float * tt, float * XT,
hipblasHandle_t handle, const int m, const int n, const int f, const int nnz,
float** devPtrTTHost, float **devPtrYthetaTHost){
#ifdef DEBUG
float elapsed;
struct timeval tv0, tv1, tv2;
gettimeofday(&tv0, NULL);
printf("*******Batch LU factorization of tt.\n");
#endif
//pointers needed by batch op
float **devPtrTT = 0;
int *INFO;
for (int k = 0; k < batch_size; k++) {
devPtrTTHost[k] = &tt[k * f * f];
}
cudacall(hipMalloc((void** ) &devPtrTT, batch_size * sizeof(*devPtrTT)));
cudacall(hipMemcpy(devPtrTT, devPtrTTHost, batch_size * sizeof(*devPtrTT),hipMemcpyHostToDevice));
//cudacall( hipMalloc(&P, f * batch_size * sizeof(int)) );
cudacall( hipMalloc(&INFO, batch_size * sizeof(int) ));
cublascall(hipblasSgetrfBatched(handle, f, devPtrTT, f, NULL, INFO, batch_size));
hipDeviceSynchronize();
#ifdef DEBUG
gettimeofday(&tv1, NULL);
elapsed = (tv1.tv_sec - tv0.tv_sec)
+ (tv1.tv_usec - tv0.tv_usec) / 1000000.0;
printf("\t %f seconds. \n", elapsed);
printf("*******solve: tt * XT = ythetaT use cublas, with LU decomposition.\n");
#endif
float **devPtrYthetaT = 0;
for (int k = 0; k < batch_size; k++) {
devPtrYthetaTHost[k] = &ythetaT[batch_offset * f + k * f];
}
cudacall(hipMalloc((void** ) &devPtrYthetaT, batch_size * sizeof(*devPtrYthetaT)));
cudacall(hipMemcpy(devPtrYthetaT, devPtrYthetaTHost, batch_size * sizeof(*devPtrYthetaT), hipMemcpyHostToDevice));
int * info2 = (int *) malloc(sizeof(int));
cublascall( hipblasSgetrsBatched(handle, HIPBLAS_OP_N, f, 1,
(const float ** ) devPtrTT, f, NULL, devPtrYthetaT, f, info2, batch_size) );
hipDeviceSynchronize();
hipError_t cudaStat1 = hipGetLastError();
if (cudaStat1 != hipSuccess) {
fprintf(stderr,"Failed to launch hipblasSgetrsBatched (error code: %s)!\n", hipGetErrorString(cudaStat1));
exit(EXIT_FAILURE);
}
cudacall( hipMemcpy(&XT[batch_offset * f], &ythetaT[batch_offset * f],
batch_size * f * sizeof(float), hipMemcpyDeviceToDevice) );
#ifdef DEBUG
gettimeofday(&tv2, NULL);
elapsed = (tv2.tv_sec - tv1.tv_sec)
+ (tv2.tv_usec - tv1.tv_usec) / 1000000.0;
printf("\t %f seconds. \n", elapsed);
#endif
cudacall(hipFree(devPtrTT));
//cudacall(hipFree(P));
cudacall(hipFree(INFO));
cudacall(hipFree(devPtrYthetaT));
return 0;
}
int updateTheta(const int batch_size, const int batch_offset, float * xx,
float * yTXT, float * thetaT,
hipblasHandle_t handle,
const int m, const int n, const int f, const int nnz,
float ** devPtrXXHost, float **devPtrYTXTHost ){
#ifdef DEBUG
float elapsed;
struct timeval tv0, tv1, tv2;
gettimeofday(&tv0, NULL);
printf("*******LU factorize xx.\n");
#endif
float **devPtrXX = 0;
for (int k = 0; k < batch_size; k++) {
devPtrXXHost[k] = &xx[k * f * f];
}
cudacall(hipMalloc((void** ) &devPtrXX, batch_size * sizeof(*devPtrXX)));
cudacall(hipMemcpy(devPtrXX, devPtrXXHost, batch_size * sizeof(*devPtrXX), hipMemcpyHostToDevice));
int *INFO;
//cudacall(hipMalloc(&P, f * batch_size * sizeof(int)));
cudacall(hipMalloc(&INFO, batch_size * sizeof(int)));
cublascall(hipblasSgetrfBatched(handle, f, devPtrXX, f, NULL, INFO, batch_size));
hipDeviceSynchronize();
#ifdef DEBUG
gettimeofday(&tv1, NULL);
elapsed = (tv1.tv_sec - tv0.tv_sec)
+ (tv1.tv_usec - tv0.tv_usec) / 1000000.0;
printf("\t %f seconds. \n", elapsed);
printf("******* solve xx * thetaT = yTXT with CUDA 7.\n");
#endif
float **devPtrYTXT = 0;
for (int k = 0; k < batch_size; k++) {
devPtrYTXTHost[k] = &yTXT[batch_offset * f + k * f];
}
cudacall(hipMalloc((void** ) &devPtrYTXT, batch_size * sizeof(*devPtrYTXT)));
cudacall(hipMemcpy(devPtrYTXT, devPtrYTXTHost, batch_size * sizeof(*devPtrYTXT),hipMemcpyHostToDevice));
int * info2 = (int *) malloc(sizeof(int));
cublascall( hipblasSgetrsBatched(handle, HIPBLAS_OP_N, f, 1,
(const float ** ) devPtrXX, f, NULL, devPtrYTXT, f, info2, batch_size) );
hipDeviceSynchronize();
hipError_t cudaStat1 = hipGetLastError();
if (cudaStat1 != hipSuccess) {
fprintf(stderr,"Failed to launch hipblasSgetrsBatched (error code: %s)!\n", hipGetErrorString(cudaStat1));
exit(EXIT_FAILURE);
}
cudacall( hipMemcpy( &thetaT[batch_offset * f], &yTXT[batch_offset * f],
batch_size * f * sizeof(float), hipMemcpyDeviceToDevice) );
#ifdef DEBUG
gettimeofday(&tv2, NULL);
elapsed = (tv2.tv_sec - tv1.tv_sec)
+ (tv2.tv_usec - tv1.tv_usec) / 1000000.0;
printf("\t %f seconds. \n", elapsed);
#endif
hipFree(devPtrXX);
hipFree(INFO);
free(info2);
hipFree(devPtrYTXT);
return 0;
}
__global__ void RMSE(const float * csrVal, const int* cooRowIndex,
const int* csrColIndex, const float * __restrict__ thetaT, const float * __restrict__ XT, float * error, const int nnz,
const int error_size, const int f) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < nnz) {
int row = cooRowIndex[i];
int col = csrColIndex[i];
float e = csrVal[i];
//if(i%1000000==0) printf("row: %d, col: %d, csrVal[%d]: %f.\n", row, col, i, e);
for (int k = 0; k < f; k++) {
#ifdef SURPASS_NAN
//a and b could be; there are user/item in testing but not training set
float a = __ldg(&thetaT[f * col + k]);
float b = __ldg(&XT[f * row + k]);
//if(isnan(a)||isnan(b))//nan not working in some platform
if(a!=a||b!=b)
break;
else
e -= a * b;
//if(isnan(a)) printf("row: %d, col: %d\n", row, col);
//if(isnan(b)) printf("b[%d]: %f.\n", i, b);
#else
e -= __ldg(&thetaT[f * col + k]) * __ldg(&XT[f * row + k]);
#endif
}
atomicAdd(&error[i%error_size], e*e);
//if(i%1000000==0) printf("error[%d]: %f.\n", i, e);
}
}
//using fp16 as thetaT's format
//using fp16 in computate seems causing register pressure since half intrinsics cannot be used.
//using fp16 in compute also does not converge. not sure if the code is incorrect, or ALS cannot tolerate half-precision
__global__ void
__launch_bounds__(64, 6)
get_hermitian100WithHalf(const int batch_offset, float* tt,
const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F,
const half* __restrict__ thetaT_fp16) {
extern __shared__ float2 thetaTemp[];
int row = blockIdx.x + batch_offset;
if (row < m) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
int tile_x = 0;
int tile_y = 0;
int tile = F/10;
for ( int i = 0; i < 10; i++){
int end = ((20-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * tile;
tile_y = (10 + threadIdx.x - end) * tile;
break;
}
}
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
//float2 theta;
//copy texture --> smem, and sync
//two layers: warp divergence unless we split at 32
//require: 32 >= SCAN_BATCH
if(threadIdx.x < 2*32 ){
int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31;
if(index < SCAN_BATCH){
if(iter*SCAN_BATCH + index < end - start){
//for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){
//IMPORTANT: for loop has constant and identical start and end
if(threadIdx.x < 32){
for (int k = 0; k < 50; k += 2){
half2 theta_half2 = __ldg((half2*)&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]);
thetaTemp[index * F/2 + k/2] = __half22float2(theta_half2);
//theta.x = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]));
//theta.y = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]));
//thetaTemp[index * F/2 + k/2] = theta;
}
}
else {
for (int k = 0; k < 50; k += 2){
half2 theta_half2 = __ldg((half2*)&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]);
thetaTemp[index * F/2 + k/2 + 25] = __half22float2(theta_half2);
//theta.x = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]));
//theta.y = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]));
//thetaTemp[index * F/2 + k/2 + 25] = theta;
}
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[index*F/2], 0, F*sizeof(float));
}
}
__syncthreads();
//tile: 10*10
if(threadIdx.x < 55 ){
for(int k = 0; k < SCAN_BATCH; k++){
accumulate_in_registers();
}
}
}
//end of iteration in copying from smem and aggregating in register
__syncthreads();
if(threadIdx.x < 55 ){
//weighted-lambda regularization
if(tile_x == tile_y){
float temp = (end - start) * lambda;
temp0 += temp;
temp11 += temp;
temp22 += temp;
temp33 += temp;
temp44 += temp;
temp55 += temp;
temp66 += temp;
temp77 += temp;
temp88 += temp;
temp99 += temp;
}
//copy output to gmem
int index = blockIdx.x*F*F;
fill_lower_half_from_registers();
//symmetric
if(tile_x!=tile_y){
fill_upper_half_from_registers();
}
}
}
}
__global__ void
__launch_bounds__(64, 6)
get_hermitian100_tt_fp16(const int batch_offset, half2* tt,
const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F,
const float2* __restrict__ thetaT) {
extern __shared__ float2 thetaTemp[];
int row = blockIdx.x + batch_offset;
if (row < m) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
int tile_x = 0;
int tile_y = 0;
int tile = F/10;
for ( int i = 0; i < 10; i++){
int end = ((20-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * tile;
tile_y = (10 + threadIdx.x - end) * tile;
break;
}
}
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
//copy texture --> smem, and sync
/*
This is the fastest implementation
thetaT is NOT coalesced loaded but cached by L1 and L2
faster than coalesced version (see the next paragraph commented out)
because it concurrently load multiple thetaT columns
two threads per theta column, e.g., threads 0 & 1 for theta[0], threads 2 & 3 for theta[1]
require: blockDim.x (64) >= 2*SCAN_BATCH
*/
///*
if(threadIdx.x < 2*SCAN_BATCH){
int anchor = start + iter*SCAN_BATCH + threadIdx.x/2;
if(anchor < end){
int col = csrColIndex[anchor];
//IMPORTANT: for loop has constant and identical start and end
for (int k = 0; k < 50; k += 2)
//thetaTemp[threadIdx.x*F/4 + k/2] =__ldg(&thetaT[ F/2 * col + threadIdx.x%2*F/4 + k/2]);
thetaTemp[threadIdx.x*F/4 + k/2] = thetaT[ F/2 * col + threadIdx.x%2*F/4 + k/2];
}
}
//*/
__syncthreads();
//tile: 10*10
if(threadIdx.x < 55){
if(iter < iterations - 1){
for(int k = 0; k < SCAN_BATCH; k++)
accumulate_in_registers();
}
else{
for(int k = 0; k < end - start - iter*SCAN_BATCH; k++)
accumulate_in_registers();
}
}
}
//end of iteration in copying from smem and aggregating in register
__syncthreads();
#ifdef DEBUG
//if(threadIdx.x==0)
// printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9);
#endif
if(threadIdx.x < 55 ){
//weighted-lambda regularization
if(tile_x == tile_y){
float temp = (end - start) * lambda;
temp0 += temp;
temp11 += temp;
temp22 += temp;
temp33 += temp;
temp44 += temp;
temp55 += temp;
temp66 += temp;
temp77 += temp;
temp88 += temp;
temp99 += temp;
}
//copy output to gmem
int index = blockIdx.x*F*F/2;
//fill_lower_half_from_registers();
fill_lower_half_from_registers_fp16();
//symmetric
if(tile_x!=tile_y){
//fill_upper_half_from_registers();
fill_upper_half_from_registers_fp16();
}
}
}
}
__global__ void
__launch_bounds__(64)
get_hermitian100(const int batch_offset, float2* tt,
const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F,
const float2* __restrict__ thetaT) {
extern __shared__ float2 thetaTemp[];
int row = blockIdx.x + batch_offset;
if (row < m) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
int tile_x = 0;
int tile_y = 0;
int tile = F/10;
for ( int i = 0; i < 10; i++){
int end = ((20-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * tile;
tile_y = (10 + threadIdx.x - end) * tile;
break;
}
}
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
//copy texture --> smem, and sync
/*
This is the fastest implementation
thetaT is NOT coalesced loaded but cached by L1 and L2
faster than coalesced version (see the next paragraph commented out)
because it concurrently load multiple thetaT columns
two threads per theta column, e.g., threads 0 & 1 for theta[0], threads 2 & 3 for theta[1]
require: blockDim.x (64) >= 2*SCAN_BATCH
*/
///*
if(threadIdx.x < 2*SCAN_BATCH){
int anchor = start + iter*SCAN_BATCH + threadIdx.x/2;
if(anchor < end){
int col = csrColIndex[anchor];
//IMPORTANT: for loop has constant and identical start and end
for (int k = 0; k < 50; k += 2)
//thetaTemp[threadIdx.x*F/4 + k/2] =__ldg(&thetaT[ F/2 * col + threadIdx.x%2*F/4 + k/2]);
thetaTemp[threadIdx.x*F/4 + k/2] = thetaT[ F/2 * col + threadIdx.x%2*F/4 + k/2];
}
}
//*/
/*
//coalesced load thetaT, has to load column by column, less concurrency, worse performance
int anchor = start + iter*SCAN_BATCH + threadIdx.x%32;
int col_local;
if(anchor < end && threadIdx.x%32 < SCAN_BATCH)
col_local = csrColIndex[anchor];
int stop = (end - start - iter*SCAN_BATCH < SCAN_BATCH)? end - start - iter*SCAN_BATCH: SCAN_BATCH;
for (int k = 0; k < stop; k++){
//deal with col_local in lane[k]
int col = __shfl(col_local, k);
//if(blockIdx.x==0 && threadIdx.x==0)
// printf("iter=%d,k=%d,col=%d,stop=%d,anchor=%d\n", iter,k, col, stop, anchor);
//this type of for is bad in performance
//for(int i = threadIdx.x; i < F; i += 64)
if(threadIdx.x<F/2)
thetaTemp[k*F/2 + threadIdx.x] = __ldg(&thetaT[ F/2 * col + threadIdx.x]);
}
*/
__syncthreads();
///*
//tile: 10*10
if(threadIdx.x < 55){
if(iter < iterations - 1){
for(int k = 0; k < SCAN_BATCH; k++)
accumulate_in_registers();
}
else{
for(int k = 0; k < end - start - iter*SCAN_BATCH; k++)
accumulate_in_registers();
}
}
//*/
}
//end of iteration in copying from smem and aggregating in register
__syncthreads();
#ifdef DEBUG
//if(threadIdx.x==0)
// printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9);
#endif
if(threadIdx.x < 55 ){
//weighted-lambda regularization
if(tile_x == tile_y){
float temp = (end - start) * lambda;
temp0 += temp;
temp11 += temp;
temp22 += temp;
temp33 += temp;
temp44 += temp;
temp55 += temp;
temp66 += temp;
temp77 += temp;
temp88 += temp;
temp99 += temp;
}
//copy output to gmem
int index = blockIdx.x*F*F/2;
//fill_lower_half_from_registers();
fill_lower_half_from_registers_float2();
//symmetric
if(tile_x!=tile_y){
//fill_upper_half_from_registers();
fill_upper_half_from_registers_float2();
}
}
}
}
/*a generic kernel to get the hermitian matrices
* as the left-hand side of the equations, to update X in ALS
*examplary F = 100, T = 10
*/
__global__ void
get_hermitianT10(const int batch_offset, float* tt,
const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F,
const float* __restrict__ thetaT) {
extern __shared__ float2 thetaTemp [];
int row = blockIdx.x + batch_offset;
if (row < m) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
int N = F/T10; // N = 100/10=10; for F = 100 and T = 10
int effective_block_size = N*(N+1)/2;
//get the x and y coordinate
int tile_x = 0;
int tile_y = 0;
for ( int i = 0; i < N; i++ ) {
int end = ((2*N-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * T10;
tile_y = (N + threadIdx.x - end) * T10;
break;
}
}
int index = blockIdx.x*F*F;
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
//phase 1 in iteration: gmem --> smem
//REQ: blockDim.x >= F/2
if(threadIdx.x < F/2){
for(int k = 0; k< SCAN_BATCH; k++){
if(iter*SCAN_BATCH + k < end - start){
float2 theta;
theta.x = __ldg(&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x]);
theta.y = __ldg(&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x+1]);
thetaTemp[k * F/2 + threadIdx.x] = theta;
//this simpler statement is slower.
//thetaTemp[k * F/2 + threadIdx.x] = __ldg((float2*)&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x]);
}
//not enough theta to copy, set zero
else
memset(&thetaTemp[k*F/2 + threadIdx.x], 0, 2*sizeof(float));
}
}
__syncthreads();
//phase 2 in iteration: smem --> register
if(threadIdx.x < effective_block_size){//this redundant "if" seems improving kernel performance
for(int k = 0; k < SCAN_BATCH; k++){
accumulate_in_registers();
}
}
}
//end of iteration in copying from smem and aggregating in register
__syncthreads();
//phase 3, after iteration: register --> gmem
if(threadIdx.x < effective_block_size){
fill_lower_half_from_registers();
//symmetric
if(tile_x != tile_y){
fill_upper_half_from_registers();
}
//regularization
if(tile_x == tile_y){
for(int k = 0; k < T10; k++)
tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda;
}
}
}
}
float doALS(const int* csrRowIndexHostPtr, const int* csrColIndexHostPtr, const float* csrValHostPtr,
const int* cscRowIndexHostPtr, const int* cscColIndexHostPtr, const float* cscValHostPtr,
const int* cooRowIndexHostPtr, float* thetaTHost, float* XTHost,
const int * cooRowIndexTestHostPtr, const int * cooColIndexTestHostPtr, const float * cooValHostTestPtr,
const int m, const int n, const int f, const long nnz, const long nnz_test, const float lambda,
const int ITERS, const int X_BATCH, const int THETA_BATCH, const int DEVICEID)
{
hipSetDevice(DEVICEID);
printf("*******parameters: m: %d, n: %d, f: %d, nnz: %ld \n", m, n, f, nnz);
//device pointers
int * csrRowIndex = 0;
int * csrColIndex = 0;
float * csrVal = 0;
float * thetaT = 0;
float * tt = 0;
float * XT = 0;
float * cscVal =0;
int * cscRowIndex = 0;
int * cscColIndex = 0;
//coo to calculate RMSE
int * cooRowIndex =0;
float * cooVal_test;
int * cooRowIndex_test;
int * cooColIndex_test;
float final_rmse = 0;
printf("*******start allocating memory on GPU...\n");
cudacall(hipMalloc((void** ) &cscRowIndex,nnz * sizeof(cscRowIndex[0])));
cudacall(hipMalloc((void** ) &cscColIndex, (n+1) * sizeof(cscColIndex[0])));
cudacall(hipMalloc((void** ) &cscVal, nnz * sizeof(cscVal[0])));
//dimension: F*N
cudacall(hipMalloc((void** ) &thetaT, f * n * sizeof(thetaT[0])));
//dimension: M*F
cudacall(hipMalloc((void** ) &XT, f * m * sizeof(XT[0])));
printf("*******start copying memory to GPU...\n");
cudacall(hipMemcpy(cscRowIndex, cscRowIndexHostPtr,(size_t ) nnz * sizeof(cscRowIndex[0]), hipMemcpyHostToDevice));
cudacall(hipMemcpy(cscColIndex, cscColIndexHostPtr,(size_t ) (n+1) * sizeof(cscColIndex[0]), hipMemcpyHostToDevice));
cudacall(hipMemcpy(cscVal, cscValHostPtr,(size_t ) (nnz * sizeof(cscVal[0])),hipMemcpyHostToDevice));
cudacall(hipMemcpy(thetaT, thetaTHost, (size_t ) (n * f * sizeof(thetaT[0])), hipMemcpyHostToDevice));
//CG needs XT
cudacall(hipMemcpy(XT, XTHost, (size_t ) (m * f * sizeof(XT[0])), hipMemcpyHostToDevice));
cudacall(hipDeviceSetCacheConfig(hipFuncCachePreferShared));
//64-bit smem access
//http://acceleware.com/blog/maximizing-shared-memory-bandwidth-nvidia-kepler-gpus
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
//initialize cublas, cusparse
hipblasHandle_t handle;
cublascall(hipblasCreate(&handle));
hipsparseHandle_t cushandle = 0;
cusparsecall(hipsparseCreate(&cushandle));
hipsparseMatDescr_t descr;
cusparsecall( hipsparseCreateMatDescr(&descr));
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
using namespace std;
#ifdef DEBUG
//variable used to time
double t0 = 0;
double t1 = 0;
#endif
printf("*******start iterations...\n");
for(int iter = 0; iter < ITERS ; iter ++){
#ifdef DEBUG
printf("---------------------------ALS iteration %d, update X.----------------------------------\n", iter);
t0 = seconds();
t1 = seconds();
#endif
//copy csr matrix in
cudacall(hipMalloc((void** ) &csrRowIndex,(m + 1) * sizeof(csrRowIndex[0])));
cudacall(hipMalloc((void** ) &csrColIndex, nnz * sizeof(csrColIndex[0])));
cudacall(hipMalloc((void** ) &csrVal, nnz * sizeof(csrVal[0])));
cudacall(hipMemcpy(csrRowIndex, csrRowIndexHostPtr,(size_t ) ((m + 1) * sizeof(csrRowIndex[0])), hipMemcpyHostToDevice));
cudacall(hipMemcpy(csrColIndex, csrColIndexHostPtr,(size_t ) (nnz * sizeof(csrColIndex[0])), hipMemcpyHostToDevice));
cudacall(hipMemcpy(csrVal, csrValHostPtr,(size_t ) (nnz * sizeof(csrVal[0])),hipMemcpyHostToDevice));
#ifdef DEBUG
printf("\tgenerate: Y*theta using cusparse.\n");
#endif
float * ytheta = 0;
float * ythetaT = 0;
cudacall(hipMalloc((void** ) &ytheta, f * m * sizeof(ytheta[0])));
cudacall(hipMalloc((void** ) &ythetaT, f * m * sizeof(ythetaT[0])));
const float alpha = 1.0f;
const float beta = 0.0f;
cusparsecall (hipsparseScsrmm2(cushandle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE, m, f, n, nnz, &alpha, descr, csrVal,
csrRowIndex, csrColIndex, thetaT, f, &beta, ytheta, m) );
//hipDeviceSynchronize();
//printf("*******transpose ytheta use cublas.\n");
//ytheta: m*f; need ythetaT = (ytheta).T = f*m
cublascall(hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, f, m, &alpha,
(const float * ) ytheta, m, &beta, ythetaT, f, ythetaT, f));
//hipDeviceSynchronize();
//cudaCheckError();
cudacall(hipFree(ytheta));
cudacall(hipFree(csrVal));
#ifdef DEBUG
printf("\tgenerate: Y*theta run %f seconds.\n", seconds() - t1);
#endif
int block_dim = f/T10*(f/T10+1)/2;
if (block_dim < f/2) block_dim = f/2;
for(int batch_id = 0; batch_id< X_BATCH; batch_id ++){
#ifdef DEBUG
printf("*******batch %d / %d.*******\n", batch_id, X_BATCH);
#endif
int batch_size = 0;
if(batch_id != X_BATCH - 1)
batch_size = m/X_BATCH;
else
batch_size = m - batch_id*(m/X_BATCH);
int batch_offset = batch_id * (m/X_BATCH);
//use fp16 in tt
#ifdef CUMF_TT_FP16
cudacall(hipMalloc((void** ) &tt, f/2 * f * batch_size * sizeof(float)));
#else
cudacall(hipMalloc((void** ) &tt, f * f * batch_size * sizeof(float)));
#endif
#ifdef DEBUG
t1 = seconds();
printf("\tupdateXByBlock kernel.\n");
#endif
if(f == 100){
//do not use fp16 by default
#ifdef CUMF_USE_HALF
half* thetaT_fp16 = 0;
cudacall(hipMalloc((void** ) &thetaT_fp16, f * n * sizeof(thetaT_fp16[0])));
hipLaunchKernelGGL(( fp32Array2fp16Array), dim3((n*f-1)/1024 + 1), dim3(1024), 0, 0, thetaT, thetaT_fp16, f*n);
hipLaunchKernelGGL(( get_hermitian100WithHalf), dim3(batch_size), dim3(64), SCAN_BATCH * f/2*sizeof(float2), 0,
batch_offset, tt, csrRowIndex, csrColIndex, lambda, m, f, thetaT_fp16);
cudacall(hipFree(thetaT_fp16));
#elif defined(CUMF_TT_FP16)
hipLaunchKernelGGL(( get_hermitian100_tt_fp16), dim3(batch_size), dim3(64), SCAN_BATCH * f/2*sizeof(float2), 0,
batch_offset, (half2*) tt, csrRowIndex, csrColIndex, lambda, m, f, (float2*)thetaT);
#ifdef CUMF_SAVE_MODEL
saveDeviceFloatArrayToFile(std::string("./log/cg-xx16-tt16.") + std::to_string(iter), f * f * batch_size/2, tt);
#endif
#else
hipLaunchKernelGGL(( get_hermitian100), dim3(batch_size), dim3(64), SCAN_BATCH * f/2*sizeof(float2), 0,
batch_offset, (float2*)tt, csrRowIndex, csrColIndex, lambda, m, f, (float2*)thetaT);
#ifdef CUMF_SAVE_MODEL
saveDeviceFloatArrayToFile(std::string("./log/0904/tt32.") + std::to_string(iter), f * f * batch_size, tt);
#endif
//This commented out is the fused kernel
//performance not good due to register pressure and low occupancy
//alsUpdateFeature100Host
// (batch_offset, csrRowIndex, csrColIndex, lambda, m, f, thetaT, XT, ythetaT, 6);
#endif
}
else
hipLaunchKernelGGL(( get_hermitianT10), dim3(batch_size), dim3(block_dim), SCAN_BATCH * f/2*sizeof(float2), 0,
batch_offset, tt, csrRowIndex, csrColIndex, lambda, m, f, thetaT);
hipDeviceSynchronize();
cudaCheckError();
#ifdef DEBUG
printf("\tupdate X kernel run %f seconds, gridSize: %d, blockSize %d.\n", seconds() - t1, batch_size, f);
t1 = seconds();
#endif
#ifdef USE_CG //use CG iterative solver
#ifdef CUMF_TT_FP16
//cg_iter = als_iter: solve more carefully in later ALS iterations
printf("\tCG solver with fp16.\n");
updateXWithCGHost_tt_fp16(tt, &XT[batch_offset*f], &ythetaT[batch_offset*f], batch_size, f, CG_ITER);
#else
printf("\tCG solver with fp32.\n");
updateXWithCGHost(tt, &XT[batch_offset*f], &ythetaT[batch_offset*f], batch_size, f, CG_ITER);
#endif
#else//use LU solver instead
//host pointers for cublas batch operations
float ** devPtrTTHost = 0;
cudacall(hipHostMalloc( (void** ) &devPtrTTHost, batch_size * sizeof(*devPtrTTHost) ) );
float **devPtrYthetaTHost = 0;
cudacall(hipHostMalloc( (void** ) &devPtrYthetaTHost, batch_size * sizeof(*devPtrYthetaTHost) ) );
updateX(batch_size, batch_offset, ythetaT, tt, XT, handle, m, n, f, nnz, devPtrTTHost, devPtrYthetaTHost);
cudacall(hipHostFree(devPtrTTHost));
cudacall(hipHostFree(devPtrYthetaTHost));
#endif
#ifdef DEBUG
printf("\tinvoke updateX with batch_size: %d, batch_offset: %d..\n", batch_size, batch_offset);
printf("\tupdateX solver run seconds: %f \n", seconds() - t1);
#endif
cudacall(hipFree(tt));
}
#ifdef DEBUG
printf("update X run %f seconds, gridSize: %d, blockSize %d.\n", seconds() - t0, m, f);
#endif
cudacall(hipFree(csrRowIndex));
cudacall(hipFree(csrColIndex));
cudacall(hipFree(ythetaT));
///*
#ifdef DEBUG
t0 = seconds();
t1 = seconds();
printf("---------------------------------- ALS iteration %d, update theta ----------------------------------\n", iter);
printf("\tgenerate: Y'*X using cusparse.\n");
#endif
float * yTX = 0;
float * yTXT = 0;
cudacall(hipMalloc((void** ) &yTXT, f * n * sizeof(yTXT[0])));
cudacall(hipMalloc((void** ) &yTX, n * f * sizeof(yTX[0])));
cusparsecall( hipsparseScsrmm2(cushandle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE, n, f, m, nnz, &alpha, descr, cscVal,
cscColIndex, cscRowIndex, XT, f, &beta, yTX, n) );
//hipDeviceSynchronize();
//printf("*******transpose yTX \n");
//yTX: n*f; need yTXT = (yTX).T = f*n
cublascall(hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, f, n, &alpha,
(const float * ) yTX, n, &beta, yTXT, f, yTXT, f));
hipDeviceSynchronize();
cudacall(hipFree(yTX));
#ifdef DEBUG
printf("\tgenerate: Y'*X run %f seconds.\n", seconds() - t1);
#endif
//in batches, when N is huge
for(int batch_id = 0; batch_id< THETA_BATCH; batch_id ++){
#ifdef DEBUG
printf("*******batch %d / %d.*******\n", batch_id, THETA_BATCH);
#endif
int batch_size = 0;
if(batch_id != THETA_BATCH - 1)
batch_size = n/THETA_BATCH;
else
batch_size = n - batch_id*(n/THETA_BATCH);
int batch_offset = batch_id * (n/THETA_BATCH);
float * xx = 0;
#ifdef CUMF_XX_FP16
cudacall(hipMalloc((void** ) &xx, f/2 * f * batch_size * sizeof(xx[0])));
cudacall( hipMemset(xx, 0, f/2*f*batch_size*sizeof(float)) );
#else
cudacall(hipMalloc((void** ) &xx, f * f * batch_size * sizeof(xx[0])));
cudacall( hipMemset(xx, 0, f*f*batch_size*sizeof(float)) );
#endif
#ifdef DEBUG
t1 = seconds();
printf("\tupdateThetaByBlock kernel.\n");
#endif
//get_hermitian_theta<<<batch_size, 64>>>(batch_offset, xx, cscRowIndex, cscColIndex, lambda, n);
//updateThetaByBlock2pRegDsmemTile<<<batch_size, F>>>
if(f == 100){
#ifdef CUMF_USE_HALF
half * XT_fp16 = 0;
cudacall(hipMalloc((void** ) &XT_fp16, f * m * sizeof(XT_fp16[0])));
hipLaunchKernelGGL(( fp32Array2fp16Array), dim3((n*f-1)/1024 + 1), dim3(1024), 0, 0, XT, XT_fp16, f*m);
hipLaunchKernelGGL(( get_hermitian100WithHalf), dim3(batch_size), dim3(64), SCAN_BATCH * f/2*sizeof(float2), 0,
batch_offset, xx, cscColIndex, cscRowIndex, lambda, n, f, XT_fp16);
cudacall(hipFree(XT_fp16));
#elif defined(CUMF_XX_FP16)
hipLaunchKernelGGL(( get_hermitian100_tt_fp16), dim3(batch_size), dim3(64), SCAN_BATCH * f/2*sizeof(float2), 0,
batch_offset, (half2*) xx, cscColIndex, cscRowIndex, lambda, n, f, (float2*)XT);
#else
hipLaunchKernelGGL(( get_hermitian100), dim3(batch_size), dim3(64), SCAN_BATCH * f/2*sizeof(float2), 0,
batch_offset, (float2*)xx, cscColIndex, cscRowIndex, lambda, n, f, (float2*)XT);
#endif
}
else
hipLaunchKernelGGL(( get_hermitianT10), dim3(batch_size), dim3(block_dim), SCAN_BATCH*f*sizeof(float), 0,
batch_offset, xx, cscColIndex, cscRowIndex, lambda, n, f, XT);
hipDeviceSynchronize();
cudaCheckError();
#ifdef DEBUG
printf("\tupdate Theta kernel run %f seconds, gridSize: %d, blockSize %d.\n",
seconds() - t1, batch_size, f);
t1 = seconds();
#endif
#ifdef DEBUG
printf("*******invoke updateTheta with batch_size: %d, batch_offset: %d.\n", batch_size, batch_offset);
#endif
#ifdef USE_CG
#ifdef CUMF_XX_FP16
printf("\tCG solver with fp16.\n");
updateXWithCGHost_tt_fp16(xx, &thetaT[batch_offset*f], &yTXT[batch_offset*f], batch_size, f, CG_ITER);
#else
printf("\tCG solver with fp32.\n");
updateXWithCGHost(xx, &thetaT[batch_offset*f], &yTXT[batch_offset*f], batch_size, f, CG_ITER);
#endif
#else
float ** devPtrXXHost = 0;
cudacall(hipHostMalloc( (void** ) &devPtrXXHost, batch_size * sizeof(*devPtrXXHost) ) );
float **devPtrYTXTHost = 0;
cudacall(hipHostMalloc( (void** ) &devPtrYTXTHost, batch_size * sizeof(*devPtrYTXTHost) ) );
updateTheta(batch_size, batch_offset, xx, yTXT, thetaT, handle, m, n, f, nnz,
devPtrXXHost, devPtrYTXTHost);
#ifdef CUMF_SAVE_MODEL
saveDeviceFloatArrayToFile(std::string("./log/0827/lu-xx32.iter") + std::to_string(iter) + std::string(".batch") + std::to_string(batch_id), f * f * batch_size, xx);
#endif
cudacall(hipHostFree(devPtrXXHost));
cudacall(hipHostFree(devPtrYTXTHost));
#endif
#ifdef DEBUG
printf("\tupdateTheta solver run seconds: %f \n", seconds() - t1);
#endif
cudacall(hipFree(xx));
}
cudacall(hipFree(yTXT));
#ifdef DEBUG
printf("update theta run %f seconds, gridSize: %d, blockSize %d.\n",
seconds() - t0, n, f);
printf("Calculate RMSE.\n");
#endif
float * errors_train = 0;
int error_size = 1000;
cudacall(hipMalloc((void** ) &errors_train, error_size * sizeof(errors_train[0])));
cudacall( hipMemset(errors_train, 0, error_size*sizeof(float)) );
cudacall(hipMalloc((void** ) &cooRowIndex, nnz * sizeof(cooRowIndex[0])));
cudacall(hipMemcpy(cooRowIndex, cooRowIndexHostPtr,(size_t ) (nnz * sizeof(cooRowIndex[0])), hipMemcpyHostToDevice));
cudacall(hipMalloc((void** ) &csrColIndex, nnz * sizeof(csrColIndex[0])));
cudacall(hipMalloc((void** ) &csrVal, nnz * sizeof(csrVal[0])));
cudacall(hipMemcpy(csrColIndex, csrColIndexHostPtr,(size_t ) (nnz * sizeof(csrColIndex[0])), hipMemcpyHostToDevice));
cudacall(hipMemcpy(csrVal, csrValHostPtr,(size_t ) (nnz * sizeof(csrVal[0])),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( RMSE), dim3((nnz-1)/256 + 1), dim3(256), 0, 0,
csrVal, cooRowIndex, csrColIndex, thetaT, XT, errors_train, nnz, error_size, f);
hipDeviceSynchronize();
cudaCheckError();
cudacall(hipFree(cooRowIndex));
cudacall(hipFree(csrColIndex));
cudacall(hipFree(csrVal));
float* rmse_train = (float*) malloc (sizeof(float));
cublascall( hipblasSasum(handle, error_size, errors_train, 1, rmse_train) );
hipDeviceSynchronize();
printf("--------- Train RMSE in iter %d: %f\n", iter, sqrt((*rmse_train)/nnz));
cudacall(hipFree(errors_train));
float * errors_test = 0;
cudacall(hipMalloc((void** ) &errors_test, error_size * sizeof(errors_test[0])));
cudacall( hipMemset(errors_test, 0, error_size*sizeof(float)) );
cudacall(hipMalloc((void** ) &cooRowIndex_test, nnz_test * sizeof(cooRowIndex_test[0])));
cudacall(hipMemcpy(cooRowIndex_test, cooRowIndexTestHostPtr,(size_t ) (nnz_test * sizeof(cooRowIndex_test[0])), hipMemcpyHostToDevice));
cudacall(hipMalloc((void** ) &cooColIndex_test, nnz_test * sizeof(cooColIndex_test[0])));
cudacall(hipMalloc((void** ) &cooVal_test, nnz_test * sizeof(cooVal_test[0])));
cudacall(hipMemcpy(cooColIndex_test, cooColIndexTestHostPtr,(size_t ) (nnz_test * sizeof(cooColIndex_test[0])), hipMemcpyHostToDevice));
cudacall(hipMemcpy(cooVal_test, cooValHostTestPtr,(size_t ) (nnz_test * sizeof(cooVal_test[0])),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( RMSE), dim3((nnz_test-1)/256), dim3(256), 0, 0, cooVal_test, cooRowIndex_test, cooColIndex_test, thetaT, XT,
errors_test, nnz_test, error_size, f);
hipDeviceSynchronize();
cudaCheckError();
cudacall(hipFree(cooRowIndex_test));
cudacall(hipFree(cooColIndex_test));
cudacall(hipFree(cooVal_test));
float* rmse_test = (float*) malloc (sizeof(float));
cublascall( hipblasSasum(handle, error_size, errors_test, 1, rmse_test) );
hipDeviceSynchronize();
final_rmse = sqrt((*rmse_test)/nnz_test);
printf("--------- Test RMSE in iter %d: %f\n", iter, final_rmse);
cudacall(hipFree(errors_test));
//*/
}
//copy feature vectors back to host
cudacall(hipMemcpy(thetaTHost, thetaT, (size_t ) (n * f * sizeof(thetaT[0])), hipMemcpyDeviceToHost));
cudacall(hipMemcpy(XTHost, XT, (size_t ) (m * f * sizeof(XT[0])), hipMemcpyDeviceToHost));
cudacall(hipFree(thetaT));
cudacall(hipFree(XT));
cudacall(hipFree(cscVal));
cudacall(hipFree(cscColIndex));
cudacall(hipFree(cscRowIndex));
//WARN: do not call hipDeviceReset inside ALS()
//because the caller needs to access XT and thetaT which was in the same context
//cudacall(hipDeviceReset());
return final_rmse;
}
| 7e46dd20374b6cc63ec2715c8b7540ce25fc75b1.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* als.cu
*
* Created on: Feb 10, 2015
* Author: Wei Tan ([email protected])
* Alternating Least Square for Matrix Factorization on CUDA 7.0+
* Code optimized for F = 100, and on cc 3.5, 3.7 platforms. Also tested in cc 5.2
*/
//do not use fp16 by default
//#define CUMF_USE_HALF
#define SURPASS_NAN // by ADRIAN HSU
#define USE_CG
//if cojugate gradient solver generates results in FP16
//#define CUMF_TT_FP16
//#define CUMF_XX_FP16
#define CG_ITER 6
//#define CUMF_SAVE_MODEL
#include "als.h"
#include "device_utilities.h"
#include "cg.h"
#include "host_utilities.h"
#include <fstream>
#include <assert.h>
#include <cuda_fp16.h>
#ifdef CUMF_USE_HALF
#define SCAN_BATCH 24
#else
#define SCAN_BATCH 28
#endif
#include <iostream>
using namespace std;
void saveDeviceFloatArrayToFile(string fileName, int size, float* d_array){
float* h_array;
cudacall(cudaMallocHost( (void** ) &h_array, size * sizeof(h_array[0])) );
cudacall(cudaMemcpy(h_array, d_array, size * sizeof(h_array[0]),cudaMemcpyDeviceToHost));
FILE * outfile = fopen(fileName.c_str(), "wb");
fwrite(h_array, sizeof(float), size, outfile);
fclose(outfile);
cudaFreeHost(h_array);
}
int updateX(const int batch_size, const int batch_offset, float * ythetaT, float * tt, float * XT,
cublasHandle_t handle, const int m, const int n, const int f, const int nnz,
float** devPtrTTHost, float **devPtrYthetaTHost){
#ifdef DEBUG
float elapsed;
struct timeval tv0, tv1, tv2;
gettimeofday(&tv0, NULL);
printf("*******Batch LU factorization of tt.\n");
#endif
//pointers needed by batch op
float **devPtrTT = 0;
int *INFO;
for (int k = 0; k < batch_size; k++) {
devPtrTTHost[k] = &tt[k * f * f];
}
cudacall(cudaMalloc((void** ) &devPtrTT, batch_size * sizeof(*devPtrTT)));
cudacall(cudaMemcpy(devPtrTT, devPtrTTHost, batch_size * sizeof(*devPtrTT),cudaMemcpyHostToDevice));
//cudacall( cudaMalloc(&P, f * batch_size * sizeof(int)) );
cudacall( cudaMalloc(&INFO, batch_size * sizeof(int) ));
cublascall(cublasSgetrfBatched(handle, f, devPtrTT, f, NULL, INFO, batch_size));
cudaThreadSynchronize();
#ifdef DEBUG
gettimeofday(&tv1, NULL);
elapsed = (tv1.tv_sec - tv0.tv_sec)
+ (tv1.tv_usec - tv0.tv_usec) / 1000000.0;
printf("\t %f seconds. \n", elapsed);
printf("*******solve: tt * XT = ythetaT use cublas, with LU decomposition.\n");
#endif
float **devPtrYthetaT = 0;
for (int k = 0; k < batch_size; k++) {
devPtrYthetaTHost[k] = &ythetaT[batch_offset * f + k * f];
}
cudacall(cudaMalloc((void** ) &devPtrYthetaT, batch_size * sizeof(*devPtrYthetaT)));
cudacall(cudaMemcpy(devPtrYthetaT, devPtrYthetaTHost, batch_size * sizeof(*devPtrYthetaT), cudaMemcpyHostToDevice));
int * info2 = (int *) malloc(sizeof(int));
cublascall( cublasSgetrsBatched(handle, CUBLAS_OP_N, f, 1,
(const float ** ) devPtrTT, f, NULL, devPtrYthetaT, f, info2, batch_size) );
cudaThreadSynchronize();
cudaError_t cudaStat1 = cudaGetLastError();
if (cudaStat1 != cudaSuccess) {
fprintf(stderr,"Failed to launch cublasSgetrsBatched (error code: %s)!\n", cudaGetErrorString(cudaStat1));
exit(EXIT_FAILURE);
}
cudacall( cudaMemcpy(&XT[batch_offset * f], &ythetaT[batch_offset * f],
batch_size * f * sizeof(float), cudaMemcpyDeviceToDevice) );
#ifdef DEBUG
gettimeofday(&tv2, NULL);
elapsed = (tv2.tv_sec - tv1.tv_sec)
+ (tv2.tv_usec - tv1.tv_usec) / 1000000.0;
printf("\t %f seconds. \n", elapsed);
#endif
cudacall(cudaFree(devPtrTT));
//cudacall(cudaFree(P));
cudacall(cudaFree(INFO));
cudacall(cudaFree(devPtrYthetaT));
return 0;
}
int updateTheta(const int batch_size, const int batch_offset, float * xx,
float * yTXT, float * thetaT,
cublasHandle_t handle,
const int m, const int n, const int f, const int nnz,
float ** devPtrXXHost, float **devPtrYTXTHost ){
#ifdef DEBUG
float elapsed;
struct timeval tv0, tv1, tv2;
gettimeofday(&tv0, NULL);
printf("*******LU factorize xx.\n");
#endif
float **devPtrXX = 0;
for (int k = 0; k < batch_size; k++) {
devPtrXXHost[k] = &xx[k * f * f];
}
cudacall(cudaMalloc((void** ) &devPtrXX, batch_size * sizeof(*devPtrXX)));
cudacall(cudaMemcpy(devPtrXX, devPtrXXHost, batch_size * sizeof(*devPtrXX), cudaMemcpyHostToDevice));
int *INFO;
//cudacall(cudaMalloc(&P, f * batch_size * sizeof(int)));
cudacall(cudaMalloc(&INFO, batch_size * sizeof(int)));
cublascall(cublasSgetrfBatched(handle, f, devPtrXX, f, NULL, INFO, batch_size));
cudaThreadSynchronize();
#ifdef DEBUG
gettimeofday(&tv1, NULL);
elapsed = (tv1.tv_sec - tv0.tv_sec)
+ (tv1.tv_usec - tv0.tv_usec) / 1000000.0;
printf("\t %f seconds. \n", elapsed);
printf("******* solve xx * thetaT = yTXT with CUDA 7.\n");
#endif
float **devPtrYTXT = 0;
for (int k = 0; k < batch_size; k++) {
devPtrYTXTHost[k] = &yTXT[batch_offset * f + k * f];
}
cudacall(cudaMalloc((void** ) &devPtrYTXT, batch_size * sizeof(*devPtrYTXT)));
cudacall(cudaMemcpy(devPtrYTXT, devPtrYTXTHost, batch_size * sizeof(*devPtrYTXT),cudaMemcpyHostToDevice));
int * info2 = (int *) malloc(sizeof(int));
cublascall( cublasSgetrsBatched(handle, CUBLAS_OP_N, f, 1,
(const float ** ) devPtrXX, f, NULL, devPtrYTXT, f, info2, batch_size) );
cudaThreadSynchronize();
cudaError_t cudaStat1 = cudaGetLastError();
if (cudaStat1 != cudaSuccess) {
fprintf(stderr,"Failed to launch cublasSgetrsBatched (error code: %s)!\n", cudaGetErrorString(cudaStat1));
exit(EXIT_FAILURE);
}
cudacall( cudaMemcpy( &thetaT[batch_offset * f], &yTXT[batch_offset * f],
batch_size * f * sizeof(float), cudaMemcpyDeviceToDevice) );
#ifdef DEBUG
gettimeofday(&tv2, NULL);
elapsed = (tv2.tv_sec - tv1.tv_sec)
+ (tv2.tv_usec - tv1.tv_usec) / 1000000.0;
printf("\t %f seconds. \n", elapsed);
#endif
cudaFree(devPtrXX);
cudaFree(INFO);
free(info2);
cudaFree(devPtrYTXT);
return 0;
}
__global__ void RMSE(const float * csrVal, const int* cooRowIndex,
const int* csrColIndex, const float * __restrict__ thetaT, const float * __restrict__ XT, float * error, const int nnz,
const int error_size, const int f) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < nnz) {
int row = cooRowIndex[i];
int col = csrColIndex[i];
float e = csrVal[i];
//if(i%1000000==0) printf("row: %d, col: %d, csrVal[%d]: %f.\n", row, col, i, e);
for (int k = 0; k < f; k++) {
#ifdef SURPASS_NAN
//a and b could be; there are user/item in testing but not training set
float a = __ldg(&thetaT[f * col + k]);
float b = __ldg(&XT[f * row + k]);
//if(isnan(a)||isnan(b))//nan not working in some platform
if(a!=a||b!=b)
break;
else
e -= a * b;
//if(isnan(a)) printf("row: %d, col: %d\n", row, col);
//if(isnan(b)) printf("b[%d]: %f.\n", i, b);
#else
e -= __ldg(&thetaT[f * col + k]) * __ldg(&XT[f * row + k]);
#endif
}
atomicAdd(&error[i%error_size], e*e);
//if(i%1000000==0) printf("error[%d]: %f.\n", i, e);
}
}
//using fp16 as thetaT's format
//using fp16 in computate seems causing register pressure since half intrinsics cannot be used.
//using fp16 in compute also does not converge. not sure if the code is incorrect, or ALS cannot tolerate half-precision
__global__ void
__launch_bounds__(64, 6)
get_hermitian100WithHalf(const int batch_offset, float* tt,
const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F,
const half* __restrict__ thetaT_fp16) {
extern __shared__ float2 thetaTemp[];
int row = blockIdx.x + batch_offset;
if (row < m) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
int tile_x = 0;
int tile_y = 0;
int tile = F/10;
for ( int i = 0; i < 10; i++){
int end = ((20-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * tile;
tile_y = (10 + threadIdx.x - end) * tile;
break;
}
}
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
//float2 theta;
//copy texture --> smem, and sync
//two layers: warp divergence unless we split at 32
//require: 32 >= SCAN_BATCH
if(threadIdx.x < 2*32 ){
int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31;
if(index < SCAN_BATCH){
if(iter*SCAN_BATCH + index < end - start){
//for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){
//IMPORTANT: for loop has constant and identical start and end
if(threadIdx.x < 32){
for (int k = 0; k < 50; k += 2){
half2 theta_half2 = __ldg((half2*)&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]);
thetaTemp[index * F/2 + k/2] = __half22float2(theta_half2);
//theta.x = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]));
//theta.y = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]));
//thetaTemp[index * F/2 + k/2] = theta;
}
}
else {
for (int k = 0; k < 50; k += 2){
half2 theta_half2 = __ldg((half2*)&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]);
thetaTemp[index * F/2 + k/2 + 25] = __half22float2(theta_half2);
//theta.x = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]));
//theta.y = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]));
//thetaTemp[index * F/2 + k/2 + 25] = theta;
}
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[index*F/2], 0, F*sizeof(float));
}
}
__syncthreads();
//tile: 10*10
if(threadIdx.x < 55 ){
for(int k = 0; k < SCAN_BATCH; k++){
accumulate_in_registers();
}
}
}
//end of iteration in copying from smem and aggregating in register
__syncthreads();
if(threadIdx.x < 55 ){
//weighted-lambda regularization
if(tile_x == tile_y){
float temp = (end - start) * lambda;
temp0 += temp;
temp11 += temp;
temp22 += temp;
temp33 += temp;
temp44 += temp;
temp55 += temp;
temp66 += temp;
temp77 += temp;
temp88 += temp;
temp99 += temp;
}
//copy output to gmem
int index = blockIdx.x*F*F;
fill_lower_half_from_registers();
//symmetric
if(tile_x!=tile_y){
fill_upper_half_from_registers();
}
}
}
}
__global__ void
__launch_bounds__(64, 6)
get_hermitian100_tt_fp16(const int batch_offset, half2* tt,
const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F,
const float2* __restrict__ thetaT) {
extern __shared__ float2 thetaTemp[];
int row = blockIdx.x + batch_offset;
if (row < m) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
int tile_x = 0;
int tile_y = 0;
int tile = F/10;
for ( int i = 0; i < 10; i++){
int end = ((20-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * tile;
tile_y = (10 + threadIdx.x - end) * tile;
break;
}
}
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
//copy texture --> smem, and sync
/*
This is the fastest implementation
thetaT is NOT coalesced loaded but cached by L1 and L2
faster than coalesced version (see the next paragraph commented out)
because it concurrently load multiple thetaT columns
two threads per theta column, e.g., threads 0 & 1 for theta[0], threads 2 & 3 for theta[1]
require: blockDim.x (64) >= 2*SCAN_BATCH
*/
///*
if(threadIdx.x < 2*SCAN_BATCH){
int anchor = start + iter*SCAN_BATCH + threadIdx.x/2;
if(anchor < end){
int col = csrColIndex[anchor];
//IMPORTANT: for loop has constant and identical start and end
for (int k = 0; k < 50; k += 2)
//thetaTemp[threadIdx.x*F/4 + k/2] =__ldg(&thetaT[ F/2 * col + threadIdx.x%2*F/4 + k/2]);
thetaTemp[threadIdx.x*F/4 + k/2] = thetaT[ F/2 * col + threadIdx.x%2*F/4 + k/2];
}
}
//*/
__syncthreads();
//tile: 10*10
if(threadIdx.x < 55){
if(iter < iterations - 1){
for(int k = 0; k < SCAN_BATCH; k++)
accumulate_in_registers();
}
else{
for(int k = 0; k < end - start - iter*SCAN_BATCH; k++)
accumulate_in_registers();
}
}
}
//end of iteration in copying from smem and aggregating in register
__syncthreads();
#ifdef DEBUG
//if(threadIdx.x==0)
// printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9);
#endif
if(threadIdx.x < 55 ){
//weighted-lambda regularization
if(tile_x == tile_y){
float temp = (end - start) * lambda;
temp0 += temp;
temp11 += temp;
temp22 += temp;
temp33 += temp;
temp44 += temp;
temp55 += temp;
temp66 += temp;
temp77 += temp;
temp88 += temp;
temp99 += temp;
}
//copy output to gmem
int index = blockIdx.x*F*F/2;
//fill_lower_half_from_registers();
fill_lower_half_from_registers_fp16();
//symmetric
if(tile_x!=tile_y){
//fill_upper_half_from_registers();
fill_upper_half_from_registers_fp16();
}
}
}
}
__global__ void
__launch_bounds__(64)
get_hermitian100(const int batch_offset, float2* tt,
const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F,
const float2* __restrict__ thetaT) {
extern __shared__ float2 thetaTemp[];
int row = blockIdx.x + batch_offset;
if (row < m) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
int tile_x = 0;
int tile_y = 0;
int tile = F/10;
for ( int i = 0; i < 10; i++){
int end = ((20-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * tile;
tile_y = (10 + threadIdx.x - end) * tile;
break;
}
}
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
//copy texture --> smem, and sync
/*
This is the fastest implementation
thetaT is NOT coalesced loaded but cached by L1 and L2
faster than coalesced version (see the next paragraph commented out)
because it concurrently load multiple thetaT columns
two threads per theta column, e.g., threads 0 & 1 for theta[0], threads 2 & 3 for theta[1]
require: blockDim.x (64) >= 2*SCAN_BATCH
*/
///*
if(threadIdx.x < 2*SCAN_BATCH){
int anchor = start + iter*SCAN_BATCH + threadIdx.x/2;
if(anchor < end){
int col = csrColIndex[anchor];
//IMPORTANT: for loop has constant and identical start and end
for (int k = 0; k < 50; k += 2)
//thetaTemp[threadIdx.x*F/4 + k/2] =__ldg(&thetaT[ F/2 * col + threadIdx.x%2*F/4 + k/2]);
thetaTemp[threadIdx.x*F/4 + k/2] = thetaT[ F/2 * col + threadIdx.x%2*F/4 + k/2];
}
}
//*/
/*
//coalesced load thetaT, has to load column by column, less concurrency, worse performance
int anchor = start + iter*SCAN_BATCH + threadIdx.x%32;
int col_local;
if(anchor < end && threadIdx.x%32 < SCAN_BATCH)
col_local = csrColIndex[anchor];
int stop = (end - start - iter*SCAN_BATCH < SCAN_BATCH)? end - start - iter*SCAN_BATCH: SCAN_BATCH;
for (int k = 0; k < stop; k++){
//deal with col_local in lane[k]
int col = __shfl(col_local, k);
//if(blockIdx.x==0 && threadIdx.x==0)
// printf("iter=%d,k=%d,col=%d,stop=%d,anchor=%d\n", iter,k, col, stop, anchor);
//this type of for is bad in performance
//for(int i = threadIdx.x; i < F; i += 64)
if(threadIdx.x<F/2)
thetaTemp[k*F/2 + threadIdx.x] = __ldg(&thetaT[ F/2 * col + threadIdx.x]);
}
*/
__syncthreads();
///*
//tile: 10*10
if(threadIdx.x < 55){
if(iter < iterations - 1){
for(int k = 0; k < SCAN_BATCH; k++)
accumulate_in_registers();
}
else{
for(int k = 0; k < end - start - iter*SCAN_BATCH; k++)
accumulate_in_registers();
}
}
//*/
}
//end of iteration in copying from smem and aggregating in register
__syncthreads();
#ifdef DEBUG
//if(threadIdx.x==0)
// printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9);
#endif
if(threadIdx.x < 55 ){
//weighted-lambda regularization
if(tile_x == tile_y){
float temp = (end - start) * lambda;
temp0 += temp;
temp11 += temp;
temp22 += temp;
temp33 += temp;
temp44 += temp;
temp55 += temp;
temp66 += temp;
temp77 += temp;
temp88 += temp;
temp99 += temp;
}
//copy output to gmem
int index = blockIdx.x*F*F/2;
//fill_lower_half_from_registers();
fill_lower_half_from_registers_float2();
//symmetric
if(tile_x!=tile_y){
//fill_upper_half_from_registers();
fill_upper_half_from_registers_float2();
}
}
}
}
/*a generic kernel to get the hermitian matrices
* as the left-hand side of the equations, to update X in ALS
*examplary F = 100, T = 10
*/
__global__ void
get_hermitianT10(const int batch_offset, float* tt,
const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F,
const float* __restrict__ thetaT) {
extern __shared__ float2 thetaTemp [];
int row = blockIdx.x + batch_offset;
if (row < m) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
int N = F/T10; // N = 100/10=10; for F = 100 and T = 10
int effective_block_size = N*(N+1)/2;
//get the x and y coordinate
int tile_x = 0;
int tile_y = 0;
for ( int i = 0; i < N; i++ ) {
int end = ((2*N-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * T10;
tile_y = (N + threadIdx.x - end) * T10;
break;
}
}
int index = blockIdx.x*F*F;
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
//phase 1 in iteration: gmem --> smem
//REQ: blockDim.x >= F/2
if(threadIdx.x < F/2){
for(int k = 0; k< SCAN_BATCH; k++){
if(iter*SCAN_BATCH + k < end - start){
float2 theta;
theta.x = __ldg(&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x]);
theta.y = __ldg(&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x+1]);
thetaTemp[k * F/2 + threadIdx.x] = theta;
//this simpler statement is slower.
//thetaTemp[k * F/2 + threadIdx.x] = __ldg((float2*)&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x]);
}
//not enough theta to copy, set zero
else
memset(&thetaTemp[k*F/2 + threadIdx.x], 0, 2*sizeof(float));
}
}
__syncthreads();
//phase 2 in iteration: smem --> register
if(threadIdx.x < effective_block_size){//this redundant "if" seems improving kernel performance
for(int k = 0; k < SCAN_BATCH; k++){
accumulate_in_registers();
}
}
}
//end of iteration in copying from smem and aggregating in register
__syncthreads();
//phase 3, after iteration: register --> gmem
if(threadIdx.x < effective_block_size){
fill_lower_half_from_registers();
//symmetric
if(tile_x != tile_y){
fill_upper_half_from_registers();
}
//regularization
if(tile_x == tile_y){
for(int k = 0; k < T10; k++)
tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda;
}
}
}
}
float doALS(const int* csrRowIndexHostPtr, const int* csrColIndexHostPtr, const float* csrValHostPtr,
const int* cscRowIndexHostPtr, const int* cscColIndexHostPtr, const float* cscValHostPtr,
const int* cooRowIndexHostPtr, float* thetaTHost, float* XTHost,
const int * cooRowIndexTestHostPtr, const int * cooColIndexTestHostPtr, const float * cooValHostTestPtr,
const int m, const int n, const int f, const long nnz, const long nnz_test, const float lambda,
const int ITERS, const int X_BATCH, const int THETA_BATCH, const int DEVICEID)
{
cudaSetDevice(DEVICEID);
printf("*******parameters: m: %d, n: %d, f: %d, nnz: %ld \n", m, n, f, nnz);
//device pointers
int * csrRowIndex = 0;
int * csrColIndex = 0;
float * csrVal = 0;
float * thetaT = 0;
float * tt = 0;
float * XT = 0;
float * cscVal =0;
int * cscRowIndex = 0;
int * cscColIndex = 0;
//coo to calculate RMSE
int * cooRowIndex =0;
float * cooVal_test;
int * cooRowIndex_test;
int * cooColIndex_test;
float final_rmse = 0;
printf("*******start allocating memory on GPU...\n");
cudacall(cudaMalloc((void** ) &cscRowIndex,nnz * sizeof(cscRowIndex[0])));
cudacall(cudaMalloc((void** ) &cscColIndex, (n+1) * sizeof(cscColIndex[0])));
cudacall(cudaMalloc((void** ) &cscVal, nnz * sizeof(cscVal[0])));
//dimension: F*N
cudacall(cudaMalloc((void** ) &thetaT, f * n * sizeof(thetaT[0])));
//dimension: M*F
cudacall(cudaMalloc((void** ) &XT, f * m * sizeof(XT[0])));
printf("*******start copying memory to GPU...\n");
cudacall(cudaMemcpy(cscRowIndex, cscRowIndexHostPtr,(size_t ) nnz * sizeof(cscRowIndex[0]), cudaMemcpyHostToDevice));
cudacall(cudaMemcpy(cscColIndex, cscColIndexHostPtr,(size_t ) (n+1) * sizeof(cscColIndex[0]), cudaMemcpyHostToDevice));
cudacall(cudaMemcpy(cscVal, cscValHostPtr,(size_t ) (nnz * sizeof(cscVal[0])),cudaMemcpyHostToDevice));
cudacall(cudaMemcpy(thetaT, thetaTHost, (size_t ) (n * f * sizeof(thetaT[0])), cudaMemcpyHostToDevice));
//CG needs XT
cudacall(cudaMemcpy(XT, XTHost, (size_t ) (m * f * sizeof(XT[0])), cudaMemcpyHostToDevice));
cudacall(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared));
//64-bit smem access
//http://acceleware.com/blog/maximizing-shared-memory-bandwidth-nvidia-kepler-gpus
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
//initialize cublas, cusparse
cublasHandle_t handle;
cublascall(cublasCreate(&handle));
cusparseHandle_t cushandle = 0;
cusparsecall(cusparseCreate(&cushandle));
cusparseMatDescr_t descr;
cusparsecall( cusparseCreateMatDescr(&descr));
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
using namespace std;
#ifdef DEBUG
//variable used to time
double t0 = 0;
double t1 = 0;
#endif
printf("*******start iterations...\n");
for(int iter = 0; iter < ITERS ; iter ++){
#ifdef DEBUG
printf("---------------------------ALS iteration %d, update X.----------------------------------\n", iter);
t0 = seconds();
t1 = seconds();
#endif
//copy csr matrix in
cudacall(cudaMalloc((void** ) &csrRowIndex,(m + 1) * sizeof(csrRowIndex[0])));
cudacall(cudaMalloc((void** ) &csrColIndex, nnz * sizeof(csrColIndex[0])));
cudacall(cudaMalloc((void** ) &csrVal, nnz * sizeof(csrVal[0])));
cudacall(cudaMemcpy(csrRowIndex, csrRowIndexHostPtr,(size_t ) ((m + 1) * sizeof(csrRowIndex[0])), cudaMemcpyHostToDevice));
cudacall(cudaMemcpy(csrColIndex, csrColIndexHostPtr,(size_t ) (nnz * sizeof(csrColIndex[0])), cudaMemcpyHostToDevice));
cudacall(cudaMemcpy(csrVal, csrValHostPtr,(size_t ) (nnz * sizeof(csrVal[0])),cudaMemcpyHostToDevice));
#ifdef DEBUG
printf("\tgenerate: Y*theta using cusparse.\n");
#endif
float * ytheta = 0;
float * ythetaT = 0;
cudacall(cudaMalloc((void** ) &ytheta, f * m * sizeof(ytheta[0])));
cudacall(cudaMalloc((void** ) &ythetaT, f * m * sizeof(ythetaT[0])));
const float alpha = 1.0f;
const float beta = 0.0f;
cusparsecall (cusparseScsrmm2(cushandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE, m, f, n, nnz, &alpha, descr, csrVal,
csrRowIndex, csrColIndex, thetaT, f, &beta, ytheta, m) );
//cudaDeviceSynchronize();
//printf("*******transpose ytheta use cublas.\n");
//ytheta: m*f; need ythetaT = (ytheta).T = f*m
cublascall(cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, f, m, &alpha,
(const float * ) ytheta, m, &beta, ythetaT, f, ythetaT, f));
//cudaDeviceSynchronize();
//cudaCheckError();
cudacall(cudaFree(ytheta));
cudacall(cudaFree(csrVal));
#ifdef DEBUG
printf("\tgenerate: Y*theta run %f seconds.\n", seconds() - t1);
#endif
int block_dim = f/T10*(f/T10+1)/2;
if (block_dim < f/2) block_dim = f/2;
for(int batch_id = 0; batch_id< X_BATCH; batch_id ++){
#ifdef DEBUG
printf("*******batch %d / %d.*******\n", batch_id, X_BATCH);
#endif
int batch_size = 0;
if(batch_id != X_BATCH - 1)
batch_size = m/X_BATCH;
else
batch_size = m - batch_id*(m/X_BATCH);
int batch_offset = batch_id * (m/X_BATCH);
//use fp16 in tt
#ifdef CUMF_TT_FP16
cudacall(cudaMalloc((void** ) &tt, f/2 * f * batch_size * sizeof(float)));
#else
cudacall(cudaMalloc((void** ) &tt, f * f * batch_size * sizeof(float)));
#endif
#ifdef DEBUG
t1 = seconds();
printf("\tupdateXByBlock kernel.\n");
#endif
if(f == 100){
//do not use fp16 by default
#ifdef CUMF_USE_HALF
half* thetaT_fp16 = 0;
cudacall(cudaMalloc((void** ) &thetaT_fp16, f * n * sizeof(thetaT_fp16[0])));
fp32Array2fp16Array<<<(n*f-1)/1024 + 1, 1024>>>(thetaT, thetaT_fp16, f*n);
get_hermitian100WithHalf<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>>
(batch_offset, tt, csrRowIndex, csrColIndex, lambda, m, f, thetaT_fp16);
cudacall(cudaFree(thetaT_fp16));
#elif defined(CUMF_TT_FP16)
get_hermitian100_tt_fp16<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>>
(batch_offset, (half2*) tt, csrRowIndex, csrColIndex, lambda, m, f, (float2*)thetaT);
#ifdef CUMF_SAVE_MODEL
saveDeviceFloatArrayToFile(std::string("./log/cg-xx16-tt16.") + std::to_string(iter), f * f * batch_size/2, tt);
#endif
#else
get_hermitian100<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>>
(batch_offset, (float2*)tt, csrRowIndex, csrColIndex, lambda, m, f, (float2*)thetaT);
#ifdef CUMF_SAVE_MODEL
saveDeviceFloatArrayToFile(std::string("./log/0904/tt32.") + std::to_string(iter), f * f * batch_size, tt);
#endif
//This commented out is the fused kernel
//performance not good due to register pressure and low occupancy
//alsUpdateFeature100Host
// (batch_offset, csrRowIndex, csrColIndex, lambda, m, f, thetaT, XT, ythetaT, 6);
#endif
}
else
get_hermitianT10<<<batch_size, block_dim, SCAN_BATCH * f/2*sizeof(float2)>>>
(batch_offset, tt, csrRowIndex, csrColIndex, lambda, m, f, thetaT);
cudaDeviceSynchronize();
cudaCheckError();
#ifdef DEBUG
printf("\tupdate X kernel run %f seconds, gridSize: %d, blockSize %d.\n", seconds() - t1, batch_size, f);
t1 = seconds();
#endif
#ifdef USE_CG //use CG iterative solver
#ifdef CUMF_TT_FP16
//cg_iter = als_iter: solve more carefully in later ALS iterations
printf("\tCG solver with fp16.\n");
updateXWithCGHost_tt_fp16(tt, &XT[batch_offset*f], &ythetaT[batch_offset*f], batch_size, f, CG_ITER);
#else
printf("\tCG solver with fp32.\n");
updateXWithCGHost(tt, &XT[batch_offset*f], &ythetaT[batch_offset*f], batch_size, f, CG_ITER);
#endif
#else//use LU solver instead
//host pointers for cublas batch operations
float ** devPtrTTHost = 0;
cudacall(cudaMallocHost( (void** ) &devPtrTTHost, batch_size * sizeof(*devPtrTTHost) ) );
float **devPtrYthetaTHost = 0;
cudacall(cudaMallocHost( (void** ) &devPtrYthetaTHost, batch_size * sizeof(*devPtrYthetaTHost) ) );
updateX(batch_size, batch_offset, ythetaT, tt, XT, handle, m, n, f, nnz, devPtrTTHost, devPtrYthetaTHost);
cudacall(cudaFreeHost(devPtrTTHost));
cudacall(cudaFreeHost(devPtrYthetaTHost));
#endif
#ifdef DEBUG
printf("\tinvoke updateX with batch_size: %d, batch_offset: %d..\n", batch_size, batch_offset);
printf("\tupdateX solver run seconds: %f \n", seconds() - t1);
#endif
cudacall(cudaFree(tt));
}
#ifdef DEBUG
printf("update X run %f seconds, gridSize: %d, blockSize %d.\n", seconds() - t0, m, f);
#endif
cudacall(cudaFree(csrRowIndex));
cudacall(cudaFree(csrColIndex));
cudacall(cudaFree(ythetaT));
///*
#ifdef DEBUG
t0 = seconds();
t1 = seconds();
printf("---------------------------------- ALS iteration %d, update theta ----------------------------------\n", iter);
printf("\tgenerate: Y'*X using cusparse.\n");
#endif
float * yTX = 0;
float * yTXT = 0;
cudacall(cudaMalloc((void** ) &yTXT, f * n * sizeof(yTXT[0])));
cudacall(cudaMalloc((void** ) &yTX, n * f * sizeof(yTX[0])));
cusparsecall( cusparseScsrmm2(cushandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE, n, f, m, nnz, &alpha, descr, cscVal,
cscColIndex, cscRowIndex, XT, f, &beta, yTX, n) );
//cudaDeviceSynchronize();
//printf("*******transpose yTX \n");
//yTX: n*f; need yTXT = (yTX).T = f*n
cublascall(cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, f, n, &alpha,
(const float * ) yTX, n, &beta, yTXT, f, yTXT, f));
cudaDeviceSynchronize();
cudacall(cudaFree(yTX));
#ifdef DEBUG
printf("\tgenerate: Y'*X run %f seconds.\n", seconds() - t1);
#endif
//in batches, when N is huge
for(int batch_id = 0; batch_id< THETA_BATCH; batch_id ++){
#ifdef DEBUG
printf("*******batch %d / %d.*******\n", batch_id, THETA_BATCH);
#endif
int batch_size = 0;
if(batch_id != THETA_BATCH - 1)
batch_size = n/THETA_BATCH;
else
batch_size = n - batch_id*(n/THETA_BATCH);
int batch_offset = batch_id * (n/THETA_BATCH);
float * xx = 0;
#ifdef CUMF_XX_FP16
cudacall(cudaMalloc((void** ) &xx, f/2 * f * batch_size * sizeof(xx[0])));
cudacall( cudaMemset(xx, 0, f/2*f*batch_size*sizeof(float)) );
#else
cudacall(cudaMalloc((void** ) &xx, f * f * batch_size * sizeof(xx[0])));
cudacall( cudaMemset(xx, 0, f*f*batch_size*sizeof(float)) );
#endif
#ifdef DEBUG
t1 = seconds();
printf("\tupdateThetaByBlock kernel.\n");
#endif
//get_hermitian_theta<<<batch_size, 64>>>(batch_offset, xx, cscRowIndex, cscColIndex, lambda, n);
//updateThetaByBlock2pRegDsmemTile<<<batch_size, F>>>
if(f == 100){
#ifdef CUMF_USE_HALF
half * XT_fp16 = 0;
cudacall(cudaMalloc((void** ) &XT_fp16, f * m * sizeof(XT_fp16[0])));
fp32Array2fp16Array<<<(n*f-1)/1024 + 1, 1024>>>(XT, XT_fp16, f*m);
get_hermitian100WithHalf<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>>
(batch_offset, xx, cscColIndex, cscRowIndex, lambda, n, f, XT_fp16);
cudacall(cudaFree(XT_fp16));
#elif defined(CUMF_XX_FP16)
get_hermitian100_tt_fp16<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>>
(batch_offset, (half2*) xx, cscColIndex, cscRowIndex, lambda, n, f, (float2*)XT);
#else
get_hermitian100<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>>
(batch_offset, (float2*)xx, cscColIndex, cscRowIndex, lambda, n, f, (float2*)XT);
#endif
}
else
get_hermitianT10<<<batch_size, block_dim, SCAN_BATCH*f*sizeof(float)>>>
(batch_offset, xx, cscColIndex, cscRowIndex, lambda, n, f, XT);
cudaDeviceSynchronize();
cudaCheckError();
#ifdef DEBUG
printf("\tupdate Theta kernel run %f seconds, gridSize: %d, blockSize %d.\n",
seconds() - t1, batch_size, f);
t1 = seconds();
#endif
#ifdef DEBUG
printf("*******invoke updateTheta with batch_size: %d, batch_offset: %d.\n", batch_size, batch_offset);
#endif
#ifdef USE_CG
#ifdef CUMF_XX_FP16
printf("\tCG solver with fp16.\n");
updateXWithCGHost_tt_fp16(xx, &thetaT[batch_offset*f], &yTXT[batch_offset*f], batch_size, f, CG_ITER);
#else
printf("\tCG solver with fp32.\n");
updateXWithCGHost(xx, &thetaT[batch_offset*f], &yTXT[batch_offset*f], batch_size, f, CG_ITER);
#endif
#else
float ** devPtrXXHost = 0;
cudacall(cudaMallocHost( (void** ) &devPtrXXHost, batch_size * sizeof(*devPtrXXHost) ) );
float **devPtrYTXTHost = 0;
cudacall(cudaMallocHost( (void** ) &devPtrYTXTHost, batch_size * sizeof(*devPtrYTXTHost) ) );
updateTheta(batch_size, batch_offset, xx, yTXT, thetaT, handle, m, n, f, nnz,
devPtrXXHost, devPtrYTXTHost);
#ifdef CUMF_SAVE_MODEL
saveDeviceFloatArrayToFile(std::string("./log/0827/lu-xx32.iter") + std::to_string(iter) + std::string(".batch") + std::to_string(batch_id), f * f * batch_size, xx);
#endif
cudacall(cudaFreeHost(devPtrXXHost));
cudacall(cudaFreeHost(devPtrYTXTHost));
#endif
#ifdef DEBUG
printf("\tupdateTheta solver run seconds: %f \n", seconds() - t1);
#endif
cudacall(cudaFree(xx));
}
cudacall(cudaFree(yTXT));
#ifdef DEBUG
printf("update theta run %f seconds, gridSize: %d, blockSize %d.\n",
seconds() - t0, n, f);
printf("Calculate RMSE.\n");
#endif
float * errors_train = 0;
int error_size = 1000;
cudacall(cudaMalloc((void** ) &errors_train, error_size * sizeof(errors_train[0])));
cudacall( cudaMemset(errors_train, 0, error_size*sizeof(float)) );
cudacall(cudaMalloc((void** ) &cooRowIndex, nnz * sizeof(cooRowIndex[0])));
cudacall(cudaMemcpy(cooRowIndex, cooRowIndexHostPtr,(size_t ) (nnz * sizeof(cooRowIndex[0])), cudaMemcpyHostToDevice));
cudacall(cudaMalloc((void** ) &csrColIndex, nnz * sizeof(csrColIndex[0])));
cudacall(cudaMalloc((void** ) &csrVal, nnz * sizeof(csrVal[0])));
cudacall(cudaMemcpy(csrColIndex, csrColIndexHostPtr,(size_t ) (nnz * sizeof(csrColIndex[0])), cudaMemcpyHostToDevice));
cudacall(cudaMemcpy(csrVal, csrValHostPtr,(size_t ) (nnz * sizeof(csrVal[0])),cudaMemcpyHostToDevice));
RMSE<<<(nnz-1)/256 + 1, 256>>>
(csrVal, cooRowIndex, csrColIndex, thetaT, XT, errors_train, nnz, error_size, f);
cudaDeviceSynchronize();
cudaCheckError();
cudacall(cudaFree(cooRowIndex));
cudacall(cudaFree(csrColIndex));
cudacall(cudaFree(csrVal));
float* rmse_train = (float*) malloc (sizeof(float));
cublascall( cublasSasum(handle, error_size, errors_train, 1, rmse_train) );
cudaDeviceSynchronize();
printf("--------- Train RMSE in iter %d: %f\n", iter, sqrt((*rmse_train)/nnz));
cudacall(cudaFree(errors_train));
float * errors_test = 0;
cudacall(cudaMalloc((void** ) &errors_test, error_size * sizeof(errors_test[0])));
cudacall( cudaMemset(errors_test, 0, error_size*sizeof(float)) );
cudacall(cudaMalloc((void** ) &cooRowIndex_test, nnz_test * sizeof(cooRowIndex_test[0])));
cudacall(cudaMemcpy(cooRowIndex_test, cooRowIndexTestHostPtr,(size_t ) (nnz_test * sizeof(cooRowIndex_test[0])), cudaMemcpyHostToDevice));
cudacall(cudaMalloc((void** ) &cooColIndex_test, nnz_test * sizeof(cooColIndex_test[0])));
cudacall(cudaMalloc((void** ) &cooVal_test, nnz_test * sizeof(cooVal_test[0])));
cudacall(cudaMemcpy(cooColIndex_test, cooColIndexTestHostPtr,(size_t ) (nnz_test * sizeof(cooColIndex_test[0])), cudaMemcpyHostToDevice));
cudacall(cudaMemcpy(cooVal_test, cooValHostTestPtr,(size_t ) (nnz_test * sizeof(cooVal_test[0])),cudaMemcpyHostToDevice));
RMSE<<<(nnz_test-1)/256, 256>>>(cooVal_test, cooRowIndex_test, cooColIndex_test, thetaT, XT,
errors_test, nnz_test, error_size, f);
cudaDeviceSynchronize();
cudaCheckError();
cudacall(cudaFree(cooRowIndex_test));
cudacall(cudaFree(cooColIndex_test));
cudacall(cudaFree(cooVal_test));
float* rmse_test = (float*) malloc (sizeof(float));
cublascall( cublasSasum(handle, error_size, errors_test, 1, rmse_test) );
cudaDeviceSynchronize();
final_rmse = sqrt((*rmse_test)/nnz_test);
printf("--------- Test RMSE in iter %d: %f\n", iter, final_rmse);
cudacall(cudaFree(errors_test));
//*/
}
//copy feature vectors back to host
cudacall(cudaMemcpy(thetaTHost, thetaT, (size_t ) (n * f * sizeof(thetaT[0])), cudaMemcpyDeviceToHost));
cudacall(cudaMemcpy(XTHost, XT, (size_t ) (m * f * sizeof(XT[0])), cudaMemcpyDeviceToHost));
cudacall(cudaFree(thetaT));
cudacall(cudaFree(XT));
cudacall(cudaFree(cscVal));
cudacall(cudaFree(cscColIndex));
cudacall(cudaFree(cscRowIndex));
//WARN: do not call cudaDeviceReset inside ALS()
//because the caller needs to access XT and thetaT which was in the same context
//cudacall(cudaDeviceReset());
return final_rmse;
}
|
89a963bccbb2c2b21e56f3d388895f6c72ab656e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <GL/glut.h>
#include <GL/gl.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define DIM 512
// Image data
unsigned char *pixels = NULL;
unsigned char *dpixels; // Device variable
int gImageWidth, gImageHeight;
// Init image data
void initBitmap(int width, int height)
{
if (pixels) free(pixels);
pixels = (unsigned char *)malloc(width * height * 4);
// CUDA STUFF
if(dpixels) hipFree(dpixels);
hipMalloc((void**)&dpixels, 4*width*height*sizeof(unsigned char));
gImageWidth = width;
gImageHeight = height;
}
// Select precision here! float or double!
#define MYFLOAT float
// User controlled parameters
int maxiter = 2000;
MYFLOAT offsetx = -200, offsety = 0, zoom = 0;
MYFLOAT scale = 1.5;
// Complex number class
struct hipComplex
{
MYFLOAT r;
MYFLOAT i;
__device__
hipComplex( MYFLOAT a, MYFLOAT b ) : r(a), i(b) {}
__device__
float magnitude2( void )
{
return r * r + i * i;
}
__device__
hipComplex operator*(const hipComplex& a)
{
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__
hipComplex operator+(const hipComplex& a)
{
return hipComplex(r+a.r, i+a.i);
}
};
__device__
int mandelbrot( int x, int y, int gImageWidth, int gImageHeight, float scale, float offsetx, float offsety, int maxiter)
{
MYFLOAT jx = scale * (MYFLOAT)(gImageWidth/2 - x + offsetx/scale)/(gImageWidth/2);
MYFLOAT jy = scale * (MYFLOAT)(gImageHeight/2 - y + offsety/scale)/(gImageWidth/2);
hipComplex c(jx, jy);
hipComplex a(jx, jy);
int i = 0;
for (i=0; i<maxiter; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
return i;
}
return i;
}
__global__
void computeFractal( unsigned char *ptr, int gImageWidth, int gImageHeight, float scale, float offsetx, float offsety, int maxiter)
{
// map from x, y to pixel position
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x + y * gImageWidth;
if( x >= gImageWidth || y >= gImageHeight)
{
return;
}
// now calculate the value at that position
int fractalValue = mandelbrot( x, y, gImageWidth, gImageHeight, scale, offsetx, offsety, maxiter);
// Colorize it
int red = 255 * fractalValue/maxiter;
if (red > 255) red = 255 - red;
int green = 255 * fractalValue*4/maxiter;
if (green > 255) green = 255 - green;
int blue = 255 * fractalValue*20/maxiter;
if (blue > 255) blue = 255 - blue;
ptr[offset*4 + 0] = red;
ptr[offset*4 + 1] = green;
ptr[offset*4 + 2] = blue;
ptr[offset*4 + 3] = 255;
}
char print_help = 0;
// Yuck, GLUT text is old junk that should be avoided... but it will have to do
static void print_str(void *font, const char *string)
{
int i;
for (i = 0; string[i]; i++)
glutBitmapCharacter(font, string[i]);
}
void PrintHelp()
{
if (print_help)
{
glPushMatrix();
glLoadIdentity();
glOrtho(-0.5, 639.5, -0.5, 479.5, -1.0, 1.0);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glColor4f(0.f, 0.f, 0.5f, 0.5f);
glRecti(40, 40, 600, 440);
glColor3f(1.f, 1.f, 1.f);
glRasterPos2i(300, 420);
print_str(GLUT_BITMAP_HELVETICA_18, "Help");
glRasterPos2i(60, 390);
print_str(GLUT_BITMAP_HELVETICA_18, "h - Toggle Help");
glRasterPos2i(60, 300);
print_str(GLUT_BITMAP_HELVETICA_18, "Left click + drag - move picture");
glRasterPos2i(60, 270);
print_str(GLUT_BITMAP_HELVETICA_18,
"Right click + drag up/down - unzoom/zoom");
glRasterPos2i(60, 240);
print_str(GLUT_BITMAP_HELVETICA_18, "+ - Increase max. iterations by 32");
glRasterPos2i(60, 210);
print_str(GLUT_BITMAP_HELVETICA_18, "- - Decrease max. iterations by 32");
glRasterPos2i(0, 0);
glDisable(GL_BLEND);
glPopMatrix();
}
}
// Compute fractal and display image
void Draw()
{
// CUDA STUFF
float elapsedTime = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int numberOfThreads = 32;
int blocksize = max(gImageWidth / numberOfThreads,1);
printf("Number of threads: %i x %i\nBlock size: %i x %i\nData dimensions: %i x %i\n", numberOfThreads, numberOfThreads, blocksize, blocksize, gImageWidth, gImageHeight);
dim3 blockDim(numberOfThreads, numberOfThreads);
dim3 gridDim(blocksize, blocksize);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( computeFractal), dim3(gridDim), dim3(blockDim), 0, 0, dpixels, gImageWidth, gImageHeight, scale ,offsetx, offsety, maxiter);
hipEventRecord(stop, 0);
hipDeviceSynchronize();
hipDeviceSynchronize();
hipMemcpy(pixels, dpixels, 4*gImageWidth*gImageHeight*sizeof(unsigned char), hipMemcpyDeviceToHost);
// Dump the whole picture onto the screen. (Old-style OpenGL but without lots of geometry that doesn't matter so much.)
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glDrawPixels( gImageWidth, gImageHeight, GL_RGBA, GL_UNSIGNED_BYTE, pixels );
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("\n*----------- BENCHMARKING -----------*\n");
printf("\n\nCalculations on the GPU with block dimension = %ix%i and grid dimension = %i x %i ran in %f miliseconds \n\n", numberOfThreads, numberOfThreads, blocksize, blocksize, elapsedTime );
if (print_help)
PrintHelp();
glutSwapBuffers();
}
char explore = 1;
static void Reshape(int width, int height)
{
glViewport(0, 0, width, height);
glLoadIdentity();
glOrtho(-0.5f, width - 0.5f, -0.5f, height - 0.5f, -1.f, 1.f);
initBitmap(width, height);
glutPostRedisplay();
}
int mouse_x, mouse_y, mouse_btn;
// Mouse down
static void mouse_button(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
{
// Record start position
mouse_x = x;
mouse_y = y;
mouse_btn = button;
}
}
// Drag mouse
static void mouse_motion(int x, int y)
{
if (mouse_btn == 0)
// Ordinary mouse button - move
{
offsetx += (x - mouse_x)*scale;
mouse_x = x;
offsety += (mouse_y - y)*scale;
mouse_y = y;
glutPostRedisplay();
}
else
// Alt mouse button - scale
{
scale *= pow(1.1, y - mouse_y);
mouse_y = y;
glutPostRedisplay();
}
}
void KeyboardProc(unsigned char key, int x, int y)
{
switch (key)
{
case 27: /* Escape key */
case 'q':
case 'Q':
exit(0);
break;
case '+':
maxiter += maxiter < 1024 - 32 ? 32 : 0;
break;
case '-':
maxiter -= maxiter > 0 + 32 ? 32 : 0;
break;
case 'h':
print_help = !print_help;
break;
}
glutPostRedisplay();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA );
glutInitWindowSize( DIM, DIM );
glutCreateWindow("Mandelbrot explorer (GPU)");
glutDisplayFunc(Draw);
glutMouseFunc(mouse_button);
glutMotionFunc(mouse_motion);
glutKeyboardFunc(KeyboardProc);
glutReshapeFunc(Reshape);
initBitmap(DIM, DIM);
glutMainLoop();
}
| 89a963bccbb2c2b21e56f3d388895f6c72ab656e.cu | #include <GL/glut.h>
#include <GL/gl.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define DIM 512
// Image data
unsigned char *pixels = NULL;
unsigned char *dpixels; // Device variable
int gImageWidth, gImageHeight;
// Init image data
void initBitmap(int width, int height)
{
if (pixels) free(pixels);
pixels = (unsigned char *)malloc(width * height * 4);
// CUDA STUFF
if(dpixels) cudaFree(dpixels);
cudaMalloc((void**)&dpixels, 4*width*height*sizeof(unsigned char));
gImageWidth = width;
gImageHeight = height;
}
// Select precision here! float or double!
#define MYFLOAT float
// User controlled parameters
int maxiter = 2000;
MYFLOAT offsetx = -200, offsety = 0, zoom = 0;
MYFLOAT scale = 1.5;
// Complex number class
struct cuComplex
{
MYFLOAT r;
MYFLOAT i;
__device__
cuComplex( MYFLOAT a, MYFLOAT b ) : r(a), i(b) {}
__device__
float magnitude2( void )
{
return r * r + i * i;
}
__device__
cuComplex operator*(const cuComplex& a)
{
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__
cuComplex operator+(const cuComplex& a)
{
return cuComplex(r+a.r, i+a.i);
}
};
__device__
int mandelbrot( int x, int y, int gImageWidth, int gImageHeight, float scale, float offsetx, float offsety, int maxiter)
{
MYFLOAT jx = scale * (MYFLOAT)(gImageWidth/2 - x + offsetx/scale)/(gImageWidth/2);
MYFLOAT jy = scale * (MYFLOAT)(gImageHeight/2 - y + offsety/scale)/(gImageWidth/2);
cuComplex c(jx, jy);
cuComplex a(jx, jy);
int i = 0;
for (i=0; i<maxiter; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
return i;
}
return i;
}
__global__
void computeFractal( unsigned char *ptr, int gImageWidth, int gImageHeight, float scale, float offsetx, float offsety, int maxiter)
{
// map from x, y to pixel position
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x + y * gImageWidth;
if( x >= gImageWidth || y >= gImageHeight)
{
return;
}
// now calculate the value at that position
int fractalValue = mandelbrot( x, y, gImageWidth, gImageHeight, scale, offsetx, offsety, maxiter);
// Colorize it
int red = 255 * fractalValue/maxiter;
if (red > 255) red = 255 - red;
int green = 255 * fractalValue*4/maxiter;
if (green > 255) green = 255 - green;
int blue = 255 * fractalValue*20/maxiter;
if (blue > 255) blue = 255 - blue;
ptr[offset*4 + 0] = red;
ptr[offset*4 + 1] = green;
ptr[offset*4 + 2] = blue;
ptr[offset*4 + 3] = 255;
}
char print_help = 0;
// Yuck, GLUT text is old junk that should be avoided... but it will have to do
static void print_str(void *font, const char *string)
{
int i;
for (i = 0; string[i]; i++)
glutBitmapCharacter(font, string[i]);
}
void PrintHelp()
{
if (print_help)
{
glPushMatrix();
glLoadIdentity();
glOrtho(-0.5, 639.5, -0.5, 479.5, -1.0, 1.0);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glColor4f(0.f, 0.f, 0.5f, 0.5f);
glRecti(40, 40, 600, 440);
glColor3f(1.f, 1.f, 1.f);
glRasterPos2i(300, 420);
print_str(GLUT_BITMAP_HELVETICA_18, "Help");
glRasterPos2i(60, 390);
print_str(GLUT_BITMAP_HELVETICA_18, "h - Toggle Help");
glRasterPos2i(60, 300);
print_str(GLUT_BITMAP_HELVETICA_18, "Left click + drag - move picture");
glRasterPos2i(60, 270);
print_str(GLUT_BITMAP_HELVETICA_18,
"Right click + drag up/down - unzoom/zoom");
glRasterPos2i(60, 240);
print_str(GLUT_BITMAP_HELVETICA_18, "+ - Increase max. iterations by 32");
glRasterPos2i(60, 210);
print_str(GLUT_BITMAP_HELVETICA_18, "- - Decrease max. iterations by 32");
glRasterPos2i(0, 0);
glDisable(GL_BLEND);
glPopMatrix();
}
}
// Compute fractal and display image
void Draw()
{
// CUDA STUFF
float elapsedTime = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int numberOfThreads = 32;
int blocksize = max(gImageWidth / numberOfThreads,1);
printf("Number of threads: %i x %i\nBlock size: %i x %i\nData dimensions: %i x %i\n", numberOfThreads, numberOfThreads, blocksize, blocksize, gImageWidth, gImageHeight);
dim3 blockDim(numberOfThreads, numberOfThreads);
dim3 gridDim(blocksize, blocksize);
cudaEventRecord(start, 0);
computeFractal<<<gridDim, blockDim>>>(dpixels, gImageWidth, gImageHeight, scale ,offsetx, offsety, maxiter);
cudaEventRecord(stop, 0);
cudaDeviceSynchronize();
cudaThreadSynchronize();
cudaMemcpy(pixels, dpixels, 4*gImageWidth*gImageHeight*sizeof(unsigned char), cudaMemcpyDeviceToHost);
// Dump the whole picture onto the screen. (Old-style OpenGL but without lots of geometry that doesn't matter so much.)
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glDrawPixels( gImageWidth, gImageHeight, GL_RGBA, GL_UNSIGNED_BYTE, pixels );
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("\n*----------- BENCHMARKING -----------*\n");
printf("\n\nCalculations on the GPU with block dimension = %ix%i and grid dimension = %i x %i ran in %f miliseconds \n\n", numberOfThreads, numberOfThreads, blocksize, blocksize, elapsedTime );
if (print_help)
PrintHelp();
glutSwapBuffers();
}
char explore = 1;
static void Reshape(int width, int height)
{
glViewport(0, 0, width, height);
glLoadIdentity();
glOrtho(-0.5f, width - 0.5f, -0.5f, height - 0.5f, -1.f, 1.f);
initBitmap(width, height);
glutPostRedisplay();
}
int mouse_x, mouse_y, mouse_btn;
// Mouse down
static void mouse_button(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
{
// Record start position
mouse_x = x;
mouse_y = y;
mouse_btn = button;
}
}
// Drag mouse
static void mouse_motion(int x, int y)
{
if (mouse_btn == 0)
// Ordinary mouse button - move
{
offsetx += (x - mouse_x)*scale;
mouse_x = x;
offsety += (mouse_y - y)*scale;
mouse_y = y;
glutPostRedisplay();
}
else
// Alt mouse button - scale
{
scale *= pow(1.1, y - mouse_y);
mouse_y = y;
glutPostRedisplay();
}
}
void KeyboardProc(unsigned char key, int x, int y)
{
switch (key)
{
case 27: /* Escape key */
case 'q':
case 'Q':
exit(0);
break;
case '+':
maxiter += maxiter < 1024 - 32 ? 32 : 0;
break;
case '-':
maxiter -= maxiter > 0 + 32 ? 32 : 0;
break;
case 'h':
print_help = !print_help;
break;
}
glutPostRedisplay();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA );
glutInitWindowSize( DIM, DIM );
glutCreateWindow("Mandelbrot explorer (GPU)");
glutDisplayFunc(Draw);
glutMouseFunc(mouse_button);
glutMotionFunc(mouse_motion);
glutKeyboardFunc(KeyboardProc);
glutReshapeFunc(Reshape);
initBitmap(DIM, DIM);
glutMainLoop();
}
|
4915883b6e2607cbe6937b9c63fb3154a278645b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _MAINKERNEL_H_
#define _MAINKERNEL_H_
#include "DTUpdateKernel.cuh"
#include "WTUpdateKernel.cuh"
#include "WTDenUpdateKernel.cuh"
#include "WTAddKernel.cuh"
#include "SamplingKernel.cuh"
#include "WTDense.cuh"
using namespace std;
void fileErrorCheck(ifstream& argFileStream, string folderName)
{
if (!argFileStream.is_open())
{
cout << "File " << folderName << " open failed" << endl;
exit(0);
}
}
int main(int argc, char *argv[]) {
clock_t startTime, startTime1,endTime;
double transferTimeCPU2GPU=0.0;
double transferTimeGPU2CPU=0.0;
double WTTime=0.0;
double samplingTimeD=0.0;
double samplingTimeS=0.0;
double DTTime=0.0;
double totalTime=0.0;
int maxTLLength;
int maxDTLength;
int maxWTLength;
int maxDocLength;
int wordLength;
int maxChunkWTLength;
int numOfWordD;
int numOfWordS;
int numChunks = 4;
int numIters = 200;
string chunkFilePrefix ="/gpfs/alpine/proj-shared/csc289/lda/datasets/nytimes";
ifstream lengthVec((chunkFilePrefix + string("/lengthVec.txt")).c_str(), ios::binary);//store max Doc and DT length
ofstream timeRecord((chunkFilePrefix + string("/timeRecord.txt")).c_str(), ios::binary);
ofstream SamplingDRecord((chunkFilePrefix + string("/SamplingDRecord.txt")).c_str(), ios::binary);
fileErrorCheck(lengthVec, "/lengthVec.txt");
lengthVec >> maxTLLength >> maxDTLength >> maxWTLength >> maxDocLength >> wordLength>>maxChunkWTLength>> numOfWordD>> numOfWordS;
lengthVec.close();
Document document(chunkFilePrefix,numChunks,maxTLLength,maxDocLength,wordLength);
document.loadDocument();
document.GPUMemAllocate();
DTChunk chunkDT(maxDTLength,maxDocLength,numChunks);
chunkDT.loadDocDTLength(chunkFilePrefix);
chunkDT.CPUMemSet();
chunkDT.GPUMemAllocate();
chunkDT.loadDTCountOffset(chunkFilePrefix);
WTD WTDen(numOfWordD, wordLength);
WTDen.GPUMemAllocate();
WTDen.GPUMemInit();
WTAll WT(maxWTLength, wordLength, numChunks, maxChunkWTLength,numOfWordS);
WT.CPUMemSet();
WT.GPUMemAllocate();
WT.GPUMemset();
WT.loadWTLength(chunkFilePrefix);
WT.loadWTCountOffset(chunkFilePrefix);
WT.blockWarpCountCPU2GPU();
WT.CPU2GPUCountOffset();
srand(time(NULL));
//hiprandState_t* randState[2];
//srand(time(NULL));
//for (int i = 0; i < 2; i++) {
// hipSetDevice(i);
// hipMalloc(&randState[i], sizeof(hiprandState_t)*GridDim*BlockDim);//may have bugs
//}
//H_ERR(hipDeviceSynchronize());
hiprandState_t* randState;
hipMalloc(&randState, sizeof(hiprandState_t)*GridDim*BlockDim);
H_ERR(hipDeviceSynchronize());
printf("Total memory usage : %f GB\n", document.TLMemory + WT.WTMemory + chunkDT.DTMemory);
for (int chunkId = 0; chunkId < numChunks; chunkId++) {
document.CPU2GPU(chunkId);
WT.chunkCPU2GPUCountOffset(chunkId);
WT.chunkGPUMemset();
//--------------update WTDen matrix ---------
UpdateWTDenKernel(WTDen, WT, document, chunkId);
//--------------update WTDen matrix-----------
//--------------update WT matrix--------
//WT.chunkCPU2GPUCountOffset(chunkId);
//WT.chunkGPUMemset();
UpdateWTKernel(WT, document,chunkId);
WT.chunkWTGPU2CPU(chunkId);// marker
//
//WT.CPU2DiskChunk(chunkFilePrefix, chunkId);// marker
/*printf("\n what's this %d\n", chunkId);*/
//--------------update WT matrix-----------
}
/*WTDen.WTDenGPU2CPU();
WTDen.WTDenCPU2Disk(chunkFilePrefix);*/
printf("WT ended!\n");
//WT.CPU2GPUCountOffset();
startTime = clock();
for (int iter = 0; iter < numIters; iter++) {
startTime1=clock();
//printf("chunk WT updated!\n");
WT.GPUMemset();
//--------------update WTDenSum -----------
UpdateWTDenRowSumKernel(WTDen,WT);
//--------------update WTDenSum -----------
//--------------update WTSum -----------
for (int chunkId = 0; chunkId < numChunks; chunkId++) {
WT.chunkCPU2GPUCountOffset(chunkId);
WT.chunkGPUMemset();
WT.chunkWTCPU2GPU(chunkId);
WTAdditionKernel(WT, document);
}
//--------------update WTSum -----------
//WT.WTGPU2CPU();// marker
//WT.CPU2Disk(chunkFilePrefix);// marker
printf("WT updated!\n");
endTime = clock();
WTTime+=(double)(endTime-startTime1)/CLOCKS_PER_SEC;
document.CPU2GPUPerplexity();
samplingTimeD=0;
for (int chunkId = 0; chunkId < numChunks; chunkId++) {
// startTime1=clock();
printf("step: %d\n",chunkId);
//--------------update DT matrix-----------
startTime1=clock();
document.CPU2GPU(chunkId);
endTime = clock();
transferTimeCPU2GPU+=(double)(endTime-startTime1)/CLOCKS_PER_SEC;
printf("%d\n", 1);
chunkDT.GPUMemSet(chunkId);
printf("%d\n", 2);
chunkDT.CPU2GPUDTCountOffset(chunkId);
printf("%d\n", 3);
//chunkDT.CPU2GPU(chunkId, document.docLengthVec[chunkId]);
startTime1=clock();
UpdateDTKernel(chunkDT, document);
endTime = clock();
printf("%d\n", 4);
//chunkDT.GPU2CPU(chunkId);
//chunkDT.CPU2Disk(chunkFilePrefix, chunkId);// marker
//--------------update DT matrix-----------
// endTime = clock();
printf("%d\n", 5);
DTTime+=(double)(endTime-startTime1)/CLOCKS_PER_SEC;
startTime1=clock();
//--------------sampling-----------
printf("%d\n", 6);
SampleKernelD(WTDen, WT, chunkDT, document, randState);
endTime = clock();
samplingTimeD+=(double)(endTime-startTime1)/CLOCKS_PER_SEC;
printf("%d\n", 7);
//WTDen.WTDenGPU2CPU();// marker
//WTDen.WTDenCPU2Disk(chunkFilePrefix);// marker
startTime1=clock();
SampleKernel(WT, chunkDT, document, randState);
printf("%d\n", 8);
endTime = clock();
//WT.WTGPU2CPU();// marker
//WT.CPU2Disk(chunkFilePrefix);// marker
//--------------sampling-----------
// endTime = clock();
samplingTimeS+=(double)(endTime-startTime1)/CLOCKS_PER_SEC;
startTime1=clock();
document.GPU2CPU(chunkId);
endTime = clock();
transferTimeGPU2CPU+=(double)(endTime-startTime1)/CLOCKS_PER_SEC;
startTime1=clock();
//--------------update chunkWT matrix-----------
WT.chunkCPU2GPUCountOffset(chunkId);
WT.chunkGPUMemset();
UpdateWTKernel(WT, document, chunkId);
WT.chunkWTGPU2CPU(chunkId);
//WT.CPU2DiskChunk(chunkFilePrefix, chunkId);
//--------------update chunkWT matrix-----------
endTime = clock();
WTTime+=(double)(endTime-startTime1)/CLOCKS_PER_SEC;
}
WTDen.GPUMemCopy();
WTDen.GPUMemset();
PerplexityKernel(document);
printf("done!!!!!");
/*document.GPU2CPUPerplexity();*/
// document.CPU2DiskPerplexity(chunkFilePrefix);
endTime = clock();
totalTime=(double)(endTime-startTime)/CLOCKS_PER_SEC;
timeRecord << WTTime << " " << DTTime << " " << samplingTimeD << " " << samplingTimeS << " " << transferTimeCPU2GPU << " " <<transferTimeGPU2CPU << " " <<totalTime << " " << document.sumPerplexity<< "\n";
SamplingDRecord << samplingTimeD << "\n";
printf("WTTime: %f, DTTime: %f, samplingTimeD:%f, samplingTimeS:%f,transferTimeCPU2GPU:%f,transferTimeGPU2CPU:%f,totalTime:%fsumPerplexity%f\n",WTTime,DTTime,samplingTimeD,samplingTimeS,transferTimeCPU2GPU,transferTimeGPU2CPU,totalTime, document.sumPerplexity);
}
timeRecord.close();
}
#endif
//
//
//volatile __shared__ int p_input[ShaMemSize];
//volatile __shared__ int p_index[ShaMemSize];
//volatile __shared__ int p_value[ShaMemSize];
//volatile __shared__ int p_index_tmp[ShaMemSize];
//volatile __shared__ int p_value_tmp[ShaMemSize];
////volatile __shared__ int p_dense[K];
//int tid = threadIdx.x;
//int globalId = threadIdx.x + blockIdx.x * blockDim.x;
//int blockId = blockIdx.x;
//int indicator = 0;
//int GridDim = gridDim.x;
//
///*int wordIdWT = blockId + (*d_counter_0)*GridDim ;*/
///*long long tokenStart = d_TokenOffset[wordId];
//long long tokenEnd = d_TokenOffset[wordId] + d_TokenCount[wordId];*/
//
//
//
//if ((blockId > (*d_token_amount_0 - 1 - *d_counter_0*gridDim.x)) || (d_slotcount[blockId + (*d_counter_0)*GridDim] == 0))
//{
// return;
//}
//int wordId = blockId + (*d_counter_0)*GridDim;
//p_input[tid] = 0;
//p_index[tid] = 0;
//p_value[tid] = 0;
//p_index_tmp[tid] = 0;
//p_value_tmp[tid] = 0;
//for (int k = tid; k < K; k += blockDim.x)
//{
// d_dense[k + K*blockId] = 0;
//}
//
//__syncthreads();
//
//for (int i = tid; i < ((d_slotcount[wordId] - 1) / blockDim.x + 1)*blockDim.x; i += blockDim.x) {
// if (i < d_slotcount[wordId]) {
// int tmpIndex = d_slotoffset[wordId] + i + numOfTokenD;
// p_input[tid] = d_a[tmpIndex];
// //atomicAdd(&d_row_sum[p_input[tid] - 1], 1);
// }
//
// __syncthreads();
// radix_sort(p_input);
// __syncthreads();
// index_value_count(p_input, p_index, p_value);
// __syncthreads();
// if (((d_slotcount[wordId] - indicator*blockDim.x) < blockDim.x) && (tid<(blockDim.x - 1)))
// {
// p_index_tmp[tid] = p_index[tid + 1];
// p_value_tmp[tid] = p_value[tid + 1];
// }
// __syncthreads();
//
// if (((d_slotcount[wordId] - indicator*blockDim.x) < blockDim.x) && (tid<(blockDim.x - 1)))
// {
// p_index[tid] = p_index_tmp[tid];
// p_value[tid] = p_value_tmp[tid];
// }
// __syncthreads();
//
// if (((d_slotcount[wordId] - indicator*blockDim.x) < blockDim.x) && (tid == (blockDim.x - 1)))
// {
// p_index[tid] = 0;
// p_value[tid] = 0;
// }
// __syncthreads();
// if (p_index[tid])
// {
// //atomicAdd(&p_dense[p_index[tid] - 1], 1);
// d_dense[p_index[tid] - 1 + K*blockId] += p_value[tid];
// }
// __syncthreads();
// p_index[tid] = 0;
// p_value[tid] = 0;
// p_input[tid] = 0;
// p_index_tmp[tid] = 0;
// p_index_tmp[tid] = 0;
// indicator++;
// __syncthreads();
//}
//__syncthreads();
///*if (globalId == 0) printf("%d mark\n", *d_counter_0);
//__syncthreads();*/
//dense_sparse_kernel(d_dense, d_index, d_value, d_count, d_slotcount, d_slotoffset, d_counter_0);
//__syncthreads();
//
//
//
| 4915883b6e2607cbe6937b9c63fb3154a278645b.cu |
#ifndef _MAINKERNEL_H_
#define _MAINKERNEL_H_
#include "DTUpdateKernel.cuh"
#include "WTUpdateKernel.cuh"
#include "WTDenUpdateKernel.cuh"
#include "WTAddKernel.cuh"
#include "SamplingKernel.cuh"
#include "WTDense.cuh"
using namespace std;
void fileErrorCheck(ifstream& argFileStream, string folderName)
{
if (!argFileStream.is_open())
{
cout << "File " << folderName << " open failed" << endl;
exit(0);
}
}
int main(int argc, char *argv[]) {
clock_t startTime, startTime1,endTime;
double transferTimeCPU2GPU=0.0;
double transferTimeGPU2CPU=0.0;
double WTTime=0.0;
double samplingTimeD=0.0;
double samplingTimeS=0.0;
double DTTime=0.0;
double totalTime=0.0;
int maxTLLength;
int maxDTLength;
int maxWTLength;
int maxDocLength;
int wordLength;
int maxChunkWTLength;
int numOfWordD;
int numOfWordS;
int numChunks = 4;
int numIters = 200;
string chunkFilePrefix ="/gpfs/alpine/proj-shared/csc289/lda/datasets/nytimes";
ifstream lengthVec((chunkFilePrefix + string("/lengthVec.txt")).c_str(), ios::binary);//store max Doc and DT length
ofstream timeRecord((chunkFilePrefix + string("/timeRecord.txt")).c_str(), ios::binary);
ofstream SamplingDRecord((chunkFilePrefix + string("/SamplingDRecord.txt")).c_str(), ios::binary);
fileErrorCheck(lengthVec, "/lengthVec.txt");
lengthVec >> maxTLLength >> maxDTLength >> maxWTLength >> maxDocLength >> wordLength>>maxChunkWTLength>> numOfWordD>> numOfWordS;
lengthVec.close();
Document document(chunkFilePrefix,numChunks,maxTLLength,maxDocLength,wordLength);
document.loadDocument();
document.GPUMemAllocate();
DTChunk chunkDT(maxDTLength,maxDocLength,numChunks);
chunkDT.loadDocDTLength(chunkFilePrefix);
chunkDT.CPUMemSet();
chunkDT.GPUMemAllocate();
chunkDT.loadDTCountOffset(chunkFilePrefix);
WTD WTDen(numOfWordD, wordLength);
WTDen.GPUMemAllocate();
WTDen.GPUMemInit();
WTAll WT(maxWTLength, wordLength, numChunks, maxChunkWTLength,numOfWordS);
WT.CPUMemSet();
WT.GPUMemAllocate();
WT.GPUMemset();
WT.loadWTLength(chunkFilePrefix);
WT.loadWTCountOffset(chunkFilePrefix);
WT.blockWarpCountCPU2GPU();
WT.CPU2GPUCountOffset();
srand(time(NULL));
//curandState* randState[2];
//srand(time(NULL));
//for (int i = 0; i < 2; i++) {
// cudaSetDevice(i);
// cudaMalloc(&randState[i], sizeof(curandState)*GridDim*BlockDim);//may have bugs
//}
//H_ERR(cudaDeviceSynchronize());
curandState* randState;
cudaMalloc(&randState, sizeof(curandState)*GridDim*BlockDim);
H_ERR(cudaDeviceSynchronize());
printf("Total memory usage : %f GB\n", document.TLMemory + WT.WTMemory + chunkDT.DTMemory);
for (int chunkId = 0; chunkId < numChunks; chunkId++) {
document.CPU2GPU(chunkId);
WT.chunkCPU2GPUCountOffset(chunkId);
WT.chunkGPUMemset();
//--------------update WTDen matrix ---------
UpdateWTDenKernel(WTDen, WT, document, chunkId);
//--------------update WTDen matrix-----------
//--------------update WT matrix--------
//WT.chunkCPU2GPUCountOffset(chunkId);
//WT.chunkGPUMemset();
UpdateWTKernel(WT, document,chunkId);
WT.chunkWTGPU2CPU(chunkId);// marker
//
//WT.CPU2DiskChunk(chunkFilePrefix, chunkId);// marker
/*printf("\n what's this %d\n", chunkId);*/
//--------------update WT matrix-----------
}
/*WTDen.WTDenGPU2CPU();
WTDen.WTDenCPU2Disk(chunkFilePrefix);*/
printf("WT ended!\n");
//WT.CPU2GPUCountOffset();
startTime = clock();
for (int iter = 0; iter < numIters; iter++) {
startTime1=clock();
//printf("chunk WT updated!\n");
WT.GPUMemset();
//--------------update WTDenSum -----------
UpdateWTDenRowSumKernel(WTDen,WT);
//--------------update WTDenSum -----------
//--------------update WTSum -----------
for (int chunkId = 0; chunkId < numChunks; chunkId++) {
WT.chunkCPU2GPUCountOffset(chunkId);
WT.chunkGPUMemset();
WT.chunkWTCPU2GPU(chunkId);
WTAdditionKernel(WT, document);
}
//--------------update WTSum -----------
//WT.WTGPU2CPU();// marker
//WT.CPU2Disk(chunkFilePrefix);// marker
printf("WT updated!\n");
endTime = clock();
WTTime+=(double)(endTime-startTime1)/CLOCKS_PER_SEC;
document.CPU2GPUPerplexity();
samplingTimeD=0;
for (int chunkId = 0; chunkId < numChunks; chunkId++) {
// startTime1=clock();
printf("step: %d\n",chunkId);
//--------------update DT matrix-----------
startTime1=clock();
document.CPU2GPU(chunkId);
endTime = clock();
transferTimeCPU2GPU+=(double)(endTime-startTime1)/CLOCKS_PER_SEC;
printf("%d\n", 1);
chunkDT.GPUMemSet(chunkId);
printf("%d\n", 2);
chunkDT.CPU2GPUDTCountOffset(chunkId);
printf("%d\n", 3);
//chunkDT.CPU2GPU(chunkId, document.docLengthVec[chunkId]);
startTime1=clock();
UpdateDTKernel(chunkDT, document);
endTime = clock();
printf("%d\n", 4);
//chunkDT.GPU2CPU(chunkId);
//chunkDT.CPU2Disk(chunkFilePrefix, chunkId);// marker
//--------------update DT matrix-----------
// endTime = clock();
printf("%d\n", 5);
DTTime+=(double)(endTime-startTime1)/CLOCKS_PER_SEC;
startTime1=clock();
//--------------sampling-----------
printf("%d\n", 6);
SampleKernelD(WTDen, WT, chunkDT, document, randState);
endTime = clock();
samplingTimeD+=(double)(endTime-startTime1)/CLOCKS_PER_SEC;
printf("%d\n", 7);
//WTDen.WTDenGPU2CPU();// marker
//WTDen.WTDenCPU2Disk(chunkFilePrefix);// marker
startTime1=clock();
SampleKernel(WT, chunkDT, document, randState);
printf("%d\n", 8);
endTime = clock();
//WT.WTGPU2CPU();// marker
//WT.CPU2Disk(chunkFilePrefix);// marker
//--------------sampling-----------
// endTime = clock();
samplingTimeS+=(double)(endTime-startTime1)/CLOCKS_PER_SEC;
startTime1=clock();
document.GPU2CPU(chunkId);
endTime = clock();
transferTimeGPU2CPU+=(double)(endTime-startTime1)/CLOCKS_PER_SEC;
startTime1=clock();
//--------------update chunkWT matrix-----------
WT.chunkCPU2GPUCountOffset(chunkId);
WT.chunkGPUMemset();
UpdateWTKernel(WT, document, chunkId);
WT.chunkWTGPU2CPU(chunkId);
//WT.CPU2DiskChunk(chunkFilePrefix, chunkId);
//--------------update chunkWT matrix-----------
endTime = clock();
WTTime+=(double)(endTime-startTime1)/CLOCKS_PER_SEC;
}
WTDen.GPUMemCopy();
WTDen.GPUMemset();
PerplexityKernel(document);
printf("done!!!!!");
/*document.GPU2CPUPerplexity();*/
// document.CPU2DiskPerplexity(chunkFilePrefix);
endTime = clock();
totalTime=(double)(endTime-startTime)/CLOCKS_PER_SEC;
timeRecord << WTTime << " " << DTTime << " " << samplingTimeD << " " << samplingTimeS << " " << transferTimeCPU2GPU << " " <<transferTimeGPU2CPU << " " <<totalTime << " " << document.sumPerplexity<< "\n";
SamplingDRecord << samplingTimeD << "\n";
printf("WTTime: %f, DTTime: %f, samplingTimeD:%f, samplingTimeS:%f,transferTimeCPU2GPU:%f,transferTimeGPU2CPU:%f,totalTime:%f,sumPerplexity:%f\n",WTTime,DTTime,samplingTimeD,samplingTimeS,transferTimeCPU2GPU,transferTimeGPU2CPU,totalTime, document.sumPerplexity);
}
timeRecord.close();
}
#endif
//
//
//volatile __shared__ int p_input[ShaMemSize];
//volatile __shared__ int p_index[ShaMemSize];
//volatile __shared__ int p_value[ShaMemSize];
//volatile __shared__ int p_index_tmp[ShaMemSize];
//volatile __shared__ int p_value_tmp[ShaMemSize];
////volatile __shared__ int p_dense[K];
//int tid = threadIdx.x;
//int globalId = threadIdx.x + blockIdx.x * blockDim.x;
//int blockId = blockIdx.x;
//int indicator = 0;
//int GridDim = gridDim.x;
//
///*int wordIdWT = blockId + (*d_counter_0)*GridDim ;*/
///*long long tokenStart = d_TokenOffset[wordId];
//long long tokenEnd = d_TokenOffset[wordId] + d_TokenCount[wordId];*/
//
//
//
//if ((blockId > (*d_token_amount_0 - 1 - *d_counter_0*gridDim.x)) || (d_slotcount[blockId + (*d_counter_0)*GridDim] == 0))
//{
// return;
//}
//int wordId = blockId + (*d_counter_0)*GridDim;
//p_input[tid] = 0;
//p_index[tid] = 0;
//p_value[tid] = 0;
//p_index_tmp[tid] = 0;
//p_value_tmp[tid] = 0;
//for (int k = tid; k < K; k += blockDim.x)
//{
// d_dense[k + K*blockId] = 0;
//}
//
//__syncthreads();
//
//for (int i = tid; i < ((d_slotcount[wordId] - 1) / blockDim.x + 1)*blockDim.x; i += blockDim.x) {
// if (i < d_slotcount[wordId]) {
// int tmpIndex = d_slotoffset[wordId] + i + numOfTokenD;
// p_input[tid] = d_a[tmpIndex];
// //atomicAdd(&d_row_sum[p_input[tid] - 1], 1);
// }
//
// __syncthreads();
// radix_sort(p_input);
// __syncthreads();
// index_value_count(p_input, p_index, p_value);
// __syncthreads();
// if (((d_slotcount[wordId] - indicator*blockDim.x) < blockDim.x) && (tid<(blockDim.x - 1)))
// {
// p_index_tmp[tid] = p_index[tid + 1];
// p_value_tmp[tid] = p_value[tid + 1];
// }
// __syncthreads();
//
// if (((d_slotcount[wordId] - indicator*blockDim.x) < blockDim.x) && (tid<(blockDim.x - 1)))
// {
// p_index[tid] = p_index_tmp[tid];
// p_value[tid] = p_value_tmp[tid];
// }
// __syncthreads();
//
// if (((d_slotcount[wordId] - indicator*blockDim.x) < blockDim.x) && (tid == (blockDim.x - 1)))
// {
// p_index[tid] = 0;
// p_value[tid] = 0;
// }
// __syncthreads();
// if (p_index[tid])
// {
// //atomicAdd(&p_dense[p_index[tid] - 1], 1);
// d_dense[p_index[tid] - 1 + K*blockId] += p_value[tid];
// }
// __syncthreads();
// p_index[tid] = 0;
// p_value[tid] = 0;
// p_input[tid] = 0;
// p_index_tmp[tid] = 0;
// p_index_tmp[tid] = 0;
// indicator++;
// __syncthreads();
//}
//__syncthreads();
///*if (globalId == 0) printf("%d mark\n", *d_counter_0);
//__syncthreads();*/
//dense_sparse_kernel(d_dense, d_index, d_value, d_count, d_slotcount, d_slotoffset, d_counter_0);
//__syncthreads();
//
//
//
|
4abf44326ee2f458f57a614ed8db4236a1f9a59a.hip | // !!! This is a file automatically generated by hipify!!!
#include "vars.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
inline void checkCuda(hipError_t result, const char *file, const int line, bool fatal=false) {
if (result != hipSuccess) {
fprintf(stderr, "%s:%d: CUDA Runtime Error %d: %s\n", file, line, int(result),
hipGetErrorString(result));\
if (fatal) {
exit(EXIT_FAILURE);
}
}
}
#define OR_FATAL(stmt) checkCuda(stmt, __FILE__, __LINE__, true)
extern int neuron;
extern int layer;
extern int batch;
extern int input;
extern float bias;
extern Duration timekernel;
extern int **csrdispl;
extern INDPREC **csrindex;
extern VALPREC **csrvalue;
extern FEATPREC *currfeat;
extern FEATPREC *nextfeat;
extern FEATPREC *nextfeat_tmp;
extern int *active;
extern int *categories;
extern int *globalcategories;
extern int myid;
extern int numproc;
extern int numthreads;
extern int *numbatch;
extern int *batchdispl;
extern int mybatch;
extern double timebalance;
extern double timecopy;
int **csrdispl_d;
INDPREC *indbuff_d;
VALPREC *valbuff_d;
#ifdef OUTOFCORE
int weightsizemax;
#ifdef OVERLAP
INDPREC *indstream_d;
VALPREC *valstream_d;
#endif
#else
INDPREC **csrindex_d;
VALPREC **csrvalue_d;
#endif
FEATPREC *currfeat_d;
FEATPREC *nextfeat_d;
int *active_d;
int *categories_d;
int blocksize;
int numblocks;
int numwarp;
int buffsize;
#ifdef BALANCE
int numfeature;
FEATPREC *sendbuff;
FEATPREC *recvbuff;
MPI_Request *catrecvrequests;
MPI_Request *catsendrequests;
MPI_Request *featrecvrequests;
MPI_Request *featsendrequests;
#endif
hipEvent_t copystart, copystop;
hipEvent_t kernelstart, kernelstop;
hipStream_t copystream;
hipStream_t kernelstream;
float elapsedTime;
__device__ __forceinline__ float __ReLU(float x){
return x < 0.0 ? 0.0 : x > 32.0 ? 32.0 : x;
};
float ReLU(float x){
return x<0.0?0.0:x>32.0?32.0:x;
};
void kernel_serial(FEATPREC *nextfeat, FEATPREC *currfeat, int *wdispl, INDPREC *windex, VALPREC *wvalue, float bias, int *active) {
int weight_block_size = 4;
int batch_per_round = 1;
printf("run kernel\n");
for(int weight_block = 0; weight_block < neuron / weight_block_size; ++weight_block) { // SM
int current_block_index_begin = wdispl[weight_block * weight_block_size];
int current_block_index_end = wdispl[(weight_block + 1) * (weight_block_size)];
int current_block_index_size = current_block_index_end - current_block_index_begin;
//printf("run kernel SM %d %d %d %d\n", weight_block, current_block_index_begin, current_block_index_end, current_block_index_size);
float shared [current_block_index_size] = {0}; // shared memory
for(int idx = 0; idx < current_block_index_size; ++idx) {
shared[idx] = wvalue[current_block_index_begin + idx];
}
// shared memory shared memory
for(int batch_idx = 0; batch_idx < (mybatch + batch_per_round - 1) / batch_per_round; ++batch_idx) { // batch
for(int col = weight_block * weight_block_size; col < weight_block * weight_block_size + weight_block_size; ++ col) { // thread y
// thread batch_block_num * weight_per_block * reg_num;
// wrap reg_num * batch_block_num
float reg [batch_per_round] = {0}; // batch
for(int idx = wdispl[col]; idx < wdispl[col + 1]; ++idx) { // idx
for(int i = 0; i < batch_per_round; ++i) { // thread x
if(batch_idx * batch_per_round + i < mybatch) {
reg[i] += shared[idx - current_block_index_begin] * currfeat[windex[idx] * mybatch + batch_idx * batch_per_round + i];
//printf("%d %f\n", i, reg[i]);
}
}
}
// batch
int batch_id_begin = batch_idx * batch_per_round;
for(int i = batch_id_begin; i < batch_id_begin + batch_per_round; ++i) {
if(i < mybatch) {
if(nextfeat[mybatch * col + i] = ReLU(reg[i - batch_id_begin] + bias)) {
active[i] += 1;
}
}
}
}
}
}
}
void infer_serial(int l) {
VALPREC *csr_val = csrvalue[l];
INDPREC *csr_index = csrindex[l];
int *csr_bias = csrdispl[l];
for(int i = 0; i < mybatch; ++i) {
active[i] = 0;
}
kernel_serial(nextfeat, currfeat, csr_bias, csr_index, csr_val, bias, active);
int feature = 0;
for(int i = 0; i < mybatch; ++i) {
if(active[i]) {
feature ++;
}
}
for(int j = 0; j < neuron; ++j) {
int curr_feature = 0;
for(int i = 0; i < mybatch; ++i) {
if(active[i]) {
nextfeat[j * feature + curr_feature] = nextfeat[j * mybatch + i];
curr_feature++;
}
}
}
mybatch = feature;
FEATPREC *tempfeat = currfeat;
currfeat = nextfeat;
nextfeat = tempfeat;
printf("real count = %d\n", feature);
}
__device__ inline void __float4Timesfloat(float4 a, float b, float4& c) {
c.x += a.x * b;
c.y += a.y * b;
c.z += a.z * b;
c.w += a.w * b;
}
__device__ inline void __float4AddfloatReLU(float4 a, float b, float4& c) {
c.x = __ReLU(a.x + b);
c.y = __ReLU(a.y + b);
c.z = __ReLU(a.z + b);
c.w = __ReLU(a.w + b);
}
__device__ inline void __float2Timesfloat(float2 a, float b, float2& c) {
c.x += a.x * b;
c.y += a.y * b;
}
__device__ inline void __float2AddfloatReLU(float2 a, float b, float2& c) {
c.x = __ReLU(a.x + b);
c.y = __ReLU(a.y + b);
}
template <typename T>
__device__ __forceinline__ T Load(const T* address) {
return __ldg(address);
}
inline __host__ __device__ int4 operator*(int4 a, int b)
{
return make_int4(a.x * b, a.y * b, a.z * b, a.w * b);
}
__device__ __forceinline__ void FMA(float x1, float x2, float* out) {
out[0] += x1 * x2;
}
static __device__ __forceinline__ void FMA(float4 x2, float x1, float4 *out) {
out[0].x += x1 * x2.x;
out[0].y += x1 * x2.y;
out[0].z += x1 * x2.z;
out[0].w += x1 * x2.w;
}
__global__ void sputnik_kernel(FEATPREC *nextfeat, FEATPREC *currfeat, int *wdispl, INDPREC *windex, VALPREC *wvalue, float bias, int *active, int mybatch){
const int col_index = blockIdx.x * 4 + threadIdx.y; // block 4 thread
const int feat_index = blockIdx.y * 32; // block 32 feat
if(feat_index > mybatch) return;
const int row_offset = Load(wdispl + col_index); //
const int nnzs = Load(wdispl + col_index + 1) - row_offset; // 32
//-------------
const int sparse_tile_size = 4 * 32 / 4; // 4 32 4
__shared__ int4 column_indices_tile_array[sparse_tile_size]; //
__shared__ float4 values_tile_array[sparse_tile_size]; //
float4* values_tile = values_tile_array + 32 / 4 * threadIdx.y; // thread 8
int4* column_indices_tile = column_indices_tile_array + 32 / 4 * threadIdx.y;
float4* sparse_values_p = reinterpret_cast<float4*>(wvalue + row_offset) + threadIdx.x; // load thread
int4* sparse_indexs_p = reinterpret_cast<int4*>(windex + row_offset) + threadIdx.x;
float4 *sparse_values_pp = sparse_values_p;
int4* sparse_indexs_pp = sparse_indexs_p;
float4* values_tile_p = values_tile + threadIdx.x; // load thread
int4* column_indices_tile_p = column_indices_tile + threadIdx.x;
//-------------
const int dense_tile_size = 32 * 32 / 8 / 4; // block feat 32
__align__(16) float4 dense_matrix_tile_array[dense_tile_size]; // thread 4 feat
float4* dense_tile_array = reinterpret_cast<float4*>(dense_matrix_tile_array); //
float4* dense_value_p = reinterpret_cast<float4*>(currfeat + feat_index) + threadIdx.x; //
//-------------
const int output_tile_size = 32 / 8; // thread 4
__align__(16) float output_matrix_tile_array[output_tile_size] = {0};
__syncthreads();
#pragma unroll
for(int i = 0; i < 32 / blockDim.x / 4; ++i) {
*(values_tile_p) = Load(sparse_values_pp);
*(column_indices_tile_p) = Load(sparse_indexs_pp) * (mybatch / 4);
sparse_values_pp += blockDim.x;
sparse_indexs_pp += blockDim.x;
values_tile_p += blockDim.x;
column_indices_tile_p += blockDim.x;
}
__syncthreads();
#pragma unroll
for(int i = 0; i < 32 / 4; ++i) { // feat 32
int* col_offset = reinterpret_cast<int*>(column_indices_tile + i);
for(int k = 0; k < 4; ++k) { //
int offset = col_offset[k];
float4* dense_value_pp = dense_value_p + offset; // offsetfeat
for(int j = 0; j < 32 / blockDim.x / 4; ++j) {
int off = (i * 4 + k) * (32 / blockDim.x / 4) + j;
dense_tile_array[off] = Load(dense_value_pp); // 4 feat
dense_value_pp += blockDim.x;
}
}
}
float* sparse_value = reinterpret_cast<float*>(values_tile); // MAC
for(int i = 0; i < 32; ++i) { // 32
float* dense_value = reinterpret_cast<float*>(dense_tile_array + i); // bug here!!!
#pragma unroll
for(int k = 0; k < 4; ++k) { // 4 feat
#pragma unroll
for(int j = 0; j < 32 / blockDim.x / 4; ++j) {
float* outputs = output_matrix_tile_array + j * 4 + k; // maybe bug here!!!
FMA(dense_value[k], sparse_value[i], outputs);
}
}
}
for(int i = 0; i < 32 / blockDim.x / 4; ++i) {
for(int j = 0; j < 4; ++j) {
if(nextfeat[col_index * mybatch + feat_index + threadIdx.x * 4 + j] = __ReLU(output_matrix_tile_array[i * 4 + j] + bias)) {
active[feat_index + threadIdx.x * 4 + j] = 1; // bug here!!!
}
}
}
}
__global__ void sputnik_kernel2(FEATPREC *nextfeat, FEATPREC *currfeat, int *wdispl, INDPREC *windex, VALPREC *wvalue, float bias, int *active, int mybatch){
const int col_index = blockIdx.x * 32 + threadIdx.y; // block 4 thread
const int row_offset = Load(wdispl + col_index); //
const int nnzs = Load(wdispl + col_index + 1) - row_offset; // 32
//-------------
const int sparse_tile_size = 32 * 32; // 4 32 4
__shared__ int column_indices_tile_array[sparse_tile_size]; //
__shared__ float values_tile_array[sparse_tile_size]; //
float* values_tile = values_tile_array + 32 * threadIdx.y; // thread 8
int* column_indices_tile = column_indices_tile_array + 32 * threadIdx.y;
float4* sparse_values_p = reinterpret_cast<float4*>(wvalue + row_offset) + threadIdx.x; // load thread
int4* sparse_indexs_p = reinterpret_cast<int4*>(windex + row_offset) + threadIdx.x;
float4 *sparse_values_pp = sparse_values_p;
int4* sparse_indexs_pp = sparse_indexs_p;
float4* values_tile_p = reinterpret_cast<float4*>(values_tile) + threadIdx.x; // load thread
int4* column_indices_tile_p = reinterpret_cast<int4*>(column_indices_tile) + threadIdx.x;
//-------------
const int dense_tile_size = 32 * 32 / 8; // block feat 32
__align__(16) float dense_matrix_tile_array[dense_tile_size]; // thread 4 feat
float4* dense_tile_array = reinterpret_cast<float4*>(dense_matrix_tile_array); //
//-------------
const int output_tile_size = 32 / 8; // thread 4
__align__(16) float output_matrix_tile_array[output_tile_size] = {0};
__syncthreads();
#pragma unroll
for(int i = 0; i < 32 / blockDim.x / 4; ++i) {
*(values_tile_p) = Load(sparse_values_pp);
*(column_indices_tile_p) = Load(sparse_indexs_pp) * (mybatch / 4);
sparse_values_pp += blockDim.x;
sparse_indexs_pp += blockDim.x;
values_tile_p += blockDim.x;
column_indices_tile_p += blockDim.x;
}
#pragma unroll
for(int f = 0; f < 2; ++f) {
int feat_index = blockIdx.y * 32 * 2 + f * 32;
if(feat_index > mybatch) return;
__syncthreads();
float4* dense_value_p = reinterpret_cast<float4*>(currfeat + feat_index) + threadIdx.x; //
#pragma unroll
for(int i = 0; i < 32; ++i) { // feat 32
int* col_offset = reinterpret_cast<int*>(column_indices_tile + i);
#pragma unroll
for(int k = 0; k < 1; ++k) { //
float4* dense_value_pp = reinterpret_cast<float4*>(dense_value_p + col_offset[k]); // offsetfeat
#pragma unroll
for(int j = 0; j < 1; ++j) {
int off = (i * 1 * 1) + k * 1 + j;
dense_tile_array[off] = Load(dense_value_pp); // 4 feat
dense_value_pp += blockDim.x;
}
}
}
float* sparse_value = reinterpret_cast<float*>(values_tile); // MAC
float4* dense_value = reinterpret_cast<float4*>(dense_tile_array); // bug here!!!
#pragma unroll
for(int i = 0; i < 32; ++i) { // 32
float* lhs_values = (sparse_value + i);
#pragma unroll
for(int k = 0; k < 1; ++k) { // 4 feat
#pragma unroll
for(int j = 0; j < 1; ++j) {
float4* outputs = reinterpret_cast<float4*>(output_matrix_tile_array + j * 4 * 1); // maybe bug here!!!
int rhs_offset = j * 1 * 1 + k * 1 + i;
FMA(dense_value[rhs_offset], lhs_values[k], outputs);
}
}
}
#pragma unroll
for(int i = 0; i < 32 / blockDim.x / 4; ++i) {
#pragma unroll
for(int j = 0; j < 4; ++j) {
if(
nextfeat[col_index * mybatch + feat_index + threadIdx.x * 4 + j] = __ReLU(output_matrix_tile_array[i * 4 + j] + bias)
){
active[feat_index + threadIdx.x * 4 + j] = 1; // bug here!!!
}
output_matrix_tile_array[i * 4 + j] = 0;
}
}
}
}
__global__ void dummy_kernel(FEATPREC *nextfeat, FEATPREC *currfeat, int *wdispl, INDPREC *windex, VALPREC *wvalue, float bias, int *active, int mybatch){
extern __shared__ float shared[];
const int weight_block_size = blockDim.x / 32;
const int batch_per_round = 32 * 4;
const int weight_block = blockIdx.x;
const int current_block_index_begin = wdispl[weight_block * weight_block_size];
const int current_block_index_end = wdispl[(weight_block + 1) * (weight_block_size)];
const int current_block_index_size = current_block_index_end - current_block_index_begin;
int* shared_index = (int*)(shared + 32 * weight_block_size);
for(int idx = threadIdx.x; idx < current_block_index_size; idx += blockDim.x) {
shared[idx] = wvalue[current_block_index_begin + idx];
shared_index[idx] = windex[current_block_index_begin + idx];
}
__syncthreads();
int col = threadIdx.x / 32 + blockIdx.x * weight_block_size;
int thread_idx = threadIdx.x % 32;
for(int batch_idx = blockIdx.y * mybatch / (batch_per_round) / gridDim.y;
batch_idx < (blockIdx.y + 1) * (mybatch + batch_per_round - 1) / (batch_per_round) / gridDim.y;
batch_idx += 1) { //
float4 reg[1] = {0};
for(int idx = wdispl[col]; idx < wdispl[col + 1]; idx += 4) { // idx
float4 weight_reg = reinterpret_cast<float4*>(shared)[(idx - current_block_index_begin) / 4];
int4 weight_idx_reg = reinterpret_cast<int4*>(shared_index)[(idx - current_block_index_begin) / 4];
if((batch_idx + 0) * batch_per_round + thread_idx * 4 + 1 < mybatch) {
for(int unroll = 0; unroll < 1; ++unroll) {
int feature_id = (batch_idx + unroll) * batch_per_round;
auto feat_p0 = reinterpret_cast<float4*>(currfeat
+ weight_idx_reg.x * mybatch + feature_id);
auto feat_p1 = reinterpret_cast<float4*>(currfeat
+ weight_idx_reg.y * mybatch + feature_id);
auto feat_p2 = reinterpret_cast<float4*>(currfeat
+ weight_idx_reg.z * mybatch + feature_id);
auto feat_p3 = reinterpret_cast<float4*>(currfeat
+ weight_idx_reg.w * mybatch + feature_id);
//__float4Timesfloat(feat_p[thread_idx], shared[idx - current_block_index_begin], reg);
__float4Timesfloat(feat_p0[thread_idx], weight_reg.x, reg[unroll]);
__float4Timesfloat(feat_p1[thread_idx], weight_reg.y, reg[unroll]);
__float4Timesfloat(feat_p2[thread_idx], weight_reg.z, reg[unroll]);
__float4Timesfloat(feat_p3[thread_idx], weight_reg.w, reg[unroll]);
}
}
}
// batch
if((batch_idx + 0) * batch_per_round + thread_idx * 4 + 1 < mybatch) {
for(int unroll = 0; unroll < 1; ++unroll) {
int feature_id = (batch_idx + unroll) * batch_per_round;
auto feat_p = reinterpret_cast<float4*>(nextfeat + mybatch * col + feature_id);
__float4AddfloatReLU(reg[unroll], bias, feat_p[thread_idx]);
if(feat_p[thread_idx].x) {
active[(batch_idx + unroll) * batch_per_round + thread_idx * 4] = 1;
}
if(feat_p[thread_idx].y) {
active[(batch_idx + unroll) * batch_per_round + thread_idx * 4 + 1] = 1;
}
if(feat_p[thread_idx].z) {
active[(batch_idx + unroll) * batch_per_round + thread_idx * 4 + 2] = 1;
}
if(feat_p[thread_idx].w) {
active[(batch_idx + unroll) * batch_per_round + thread_idx * 4 + 3] = 1;
}
}
}
}
};
void setup_gpu(){
mybatch = batch;
hipSetDevice(myid % 2); // node GPU
hipFuncSetAttribute(dummy_kernel,hipFuncAttributeMaxDynamicSharedMemorySize,98304);
OR_FATAL(hipEventCreate(&kernelstart));
OR_FATAL(hipEventCreate(&kernelstop));
OR_FATAL(hipEventCreate(©start));
OR_FATAL(hipEventCreate(©stop));
OR_FATAL(hipStreamCreate(©stream));
OR_FATAL(hipStreamCreate(&kernelstream));
OR_FATAL(hipHostMalloc((void**)&active,sizeof(int)*mybatch));
OR_FATAL(hipMalloc((void**)&active_d,sizeof(int)*mybatch));
for(int k = 0; k < mybatch; k++){
active[k] = neuron;
}
OR_FATAL(hipMemset(active_d,0,sizeof(int)*mybatch));
csrdispl_d = new int*[layer];
csrindex_d = new INDPREC*[layer];
csrvalue_d = new VALPREC*[layer];
for(int l = 0; l < layer; l++){
OR_FATAL(hipMalloc((void**)&csrdispl_d[l], sizeof(int) * (neuron+1)));
OR_FATAL(hipMemcpy(csrdispl_d[l], csrdispl[l], sizeof(int) * (neuron+1), hipMemcpyHostToDevice));
OR_FATAL(hipMalloc((void**)&csrindex_d[l],sizeof(INDPREC) * csrdispl[l][neuron]));
OR_FATAL(hipMalloc((void**)&csrvalue_d[l],sizeof(VALPREC) * csrdispl[l][neuron]));
OR_FATAL(hipMemcpy(csrindex_d[l], csrindex[l], sizeof(INDPREC) * csrdispl[l][neuron], hipMemcpyHostToDevice));
OR_FATAL(hipMemcpy(csrvalue_d[l], csrvalue[l], sizeof(VALPREC) * csrdispl[l][neuron], hipMemcpyHostToDevice));
}
OR_FATAL(hipMalloc((void**)&indbuff_d,sizeof(INDPREC)* neuron * 32));
OR_FATAL(hipMalloc((void**)&valbuff_d,sizeof(VALPREC)* neuron * 32));
OR_FATAL(hipMalloc((void**)&currfeat_d, sizeof(FEATPREC) * mybatch * neuron));
OR_FATAL(hipMalloc((void**)&nextfeat_d, sizeof(FEATPREC) * mybatch * neuron));
OR_FATAL(hipMemset(currfeat_d, 0, sizeof(FEATPREC) * mybatch * neuron));
OR_FATAL(hipMemset(nextfeat_d, 0, sizeof(FEATPREC) * mybatch * neuron));
OR_FATAL(hipMemcpy(currfeat_d, currfeat, sizeof(FEATPREC) * mybatch * neuron, hipMemcpyHostToDevice));
}
void infer_gpu(int l){
int weight_block_size = 1;
int batch_per_round = 32;
int batch_block_size = mybatch / (batch_per_round * 4) + 1;
int shared_memory_size = 32 * weight_block_size * sizeof(float);
dim3 block(8, 32);
dim3 grid(neuron / 32, (mybatch / (32 * 2) + 1));
indbuff_d = csrindex_d[l];
valbuff_d = csrvalue_d[l];
auto startkernel = sc::now();
OR_FATAL(hipMemsetAsync(active_d, 0, sizeof(int) * mybatch, kernelstream));
OR_FATAL(hipEventRecord(kernelstart,kernelstream));
hipLaunchKernelGGL(( sputnik_kernel2), dim3(grid), dim3(block), 0, kernelstream, nextfeat_d, currfeat_d, csrdispl_d[l], indbuff_d, valbuff_d,bias, active_d, mybatch);
OR_FATAL(hipEventRecord(kernelstop,kernelstream));
OR_FATAL(hipMemcpyAsync(active, active_d, sizeof(int) * mybatch, hipMemcpyDeviceToHost, kernelstream));
OR_FATAL(hipStreamSynchronize(kernelstream));
timekernel += sc::now() - startkernel;
int feature = 0;
// for(int i = 0; i < mybatch; ++i) {
// active[i] = 0;
// }
// OR_FATAL(hipMemcpy(nextfeat, nextfeat_d, neuron * mybatch * sizeof(float), hipMemcpyDeviceToHost));
// for(int i = 0; i < mybatch; ++i) {
// for(int j = 0; j < neuron; ++j) {
// if(nextfeat[j * mybatch + i]) {
// active[i] = 1;
// }
// }
// }
for(int i = 0; i < mybatch; ++i) {
if(active[i]) {
feature ++;
}
}
int alignment_bias = (feature % 4 == 0) ? 0 : (4 - feature % 4);
feature += alignment_bias;
if(feature != mybatch) {
OR_FATAL(hipMemcpy(nextfeat, nextfeat_d, neuron * mybatch * sizeof(float), hipMemcpyDeviceToHost));
for(int j = 0; j < neuron; ++j) {
int curr_feature = 0;
for(int i = 0; i < mybatch + alignment_bias; ++i) {
if(i < mybatch && active[i]) {
nextfeat_tmp[j * feature + curr_feature] = nextfeat[j * mybatch + i];
curr_feature++;
}
if(i >= mybatch) {
nextfeat_tmp[j * feature + curr_feature] = 0;
curr_feature++;
}
}
}
OR_FATAL(hipMemcpy(nextfeat_d, nextfeat_tmp, neuron * feature * sizeof(float), hipMemcpyHostToDevice));
}
mybatch = feature;
FEATPREC *tempfeat_d = currfeat_d;
currfeat_d = nextfeat_d;
nextfeat_d = tempfeat_d;
printf("real count = %d\n", feature);
}; | 4abf44326ee2f458f57a614ed8db4236a1f9a59a.cu | #include "vars.h"
#include <cuda.h>
#include <cuda_runtime.h>
inline void checkCuda(cudaError_t result, const char *file, const int line, bool fatal=false) {
if (result != cudaSuccess) {
fprintf(stderr, "%s:%d: CUDA Runtime Error %d: %s\n", file, line, int(result),
cudaGetErrorString(result));\
if (fatal) {
exit(EXIT_FAILURE);
}
}
}
#define OR_FATAL(stmt) checkCuda(stmt, __FILE__, __LINE__, true)
extern int neuron;
extern int layer;
extern int batch;
extern int input;
extern float bias;
extern Duration timekernel;
extern int **csrdispl;
extern INDPREC **csrindex;
extern VALPREC **csrvalue;
extern FEATPREC *currfeat;
extern FEATPREC *nextfeat;
extern FEATPREC *nextfeat_tmp;
extern int *active;
extern int *categories;
extern int *globalcategories;
extern int myid;
extern int numproc;
extern int numthreads;
extern int *numbatch;
extern int *batchdispl;
extern int mybatch;
extern double timebalance;
extern double timecopy;
int **csrdispl_d;
INDPREC *indbuff_d;
VALPREC *valbuff_d;
#ifdef OUTOFCORE
int weightsizemax;
#ifdef OVERLAP
INDPREC *indstream_d;
VALPREC *valstream_d;
#endif
#else
INDPREC **csrindex_d;
VALPREC **csrvalue_d;
#endif
FEATPREC *currfeat_d;
FEATPREC *nextfeat_d;
int *active_d;
int *categories_d;
int blocksize;
int numblocks;
int numwarp;
int buffsize;
#ifdef BALANCE
int numfeature;
FEATPREC *sendbuff;
FEATPREC *recvbuff;
MPI_Request *catrecvrequests;
MPI_Request *catsendrequests;
MPI_Request *featrecvrequests;
MPI_Request *featsendrequests;
#endif
cudaEvent_t copystart, copystop;
cudaEvent_t kernelstart, kernelstop;
cudaStream_t copystream;
cudaStream_t kernelstream;
float elapsedTime;
__device__ __forceinline__ float __ReLU(float x){
return x < 0.0 ? 0.0 : x > 32.0 ? 32.0 : x;
};
float ReLU(float x){
return x<0.0?0.0:x>32.0?32.0:x;
};
void kernel_serial(FEATPREC *nextfeat, FEATPREC *currfeat, int *wdispl, INDPREC *windex, VALPREC *wvalue, float bias, int *active) {
int weight_block_size = 4;
int batch_per_round = 1;
printf("run kernel\n");
for(int weight_block = 0; weight_block < neuron / weight_block_size; ++weight_block) { // 这个循环划分到不同 SM 上
int current_block_index_begin = wdispl[weight_block * weight_block_size];
int current_block_index_end = wdispl[(weight_block + 1) * (weight_block_size)];
int current_block_index_size = current_block_index_end - current_block_index_begin;
//printf("run kernel SM %d %d %d %d\n", weight_block, current_block_index_begin, current_block_index_end, current_block_index_size);
float shared [current_block_index_size] = {0}; // shared memory 固定长度
for(int idx = 0; idx < current_block_index_size; ++idx) {
shared[idx] = wvalue[current_block_index_begin + idx];
}
// 同步,写 shared memory 与 读 shared memory
for(int batch_idx = 0; batch_idx < (mybatch + batch_per_round - 1) / batch_per_round; ++batch_idx) { // batch 分段执行
for(int col = weight_block * weight_block_size; col < weight_block * weight_block_size + weight_block_size; ++ col) { // thread y
//最大 thread 个数:batch_block_num * weight_per_block * reg_num;
// wrap 任务,划分 reg_num * batch_block_num 个线程
float reg [batch_per_round] = {0}; // 对于 batch 来说,连续访问,空间局部性好;
for(int idx = wdispl[col]; idx < wdispl[col + 1]; ++idx) { // 同一个 idx 的会更新同一块区域
for(int i = 0; i < batch_per_round; ++i) { // thread x
if(batch_idx * batch_per_round + i < mybatch) {
reg[i] += shared[idx - current_block_index_begin] * currfeat[windex[idx] * mybatch + batch_idx * batch_per_round + i];
//printf("%d %f\n", i, reg[i]);
}
}
}
// 写回分段后 batch 的结果
int batch_id_begin = batch_idx * batch_per_round;
for(int i = batch_id_begin; i < batch_id_begin + batch_per_round; ++i) {
if(i < mybatch) {
if(nextfeat[mybatch * col + i] = ReLU(reg[i - batch_id_begin] + bias)) {
active[i] += 1;
}
}
}
}
}
}
}
void infer_serial(int l) {
VALPREC *csr_val = csrvalue[l];
INDPREC *csr_index = csrindex[l];
int *csr_bias = csrdispl[l];
for(int i = 0; i < mybatch; ++i) {
active[i] = 0;
}
kernel_serial(nextfeat, currfeat, csr_bias, csr_index, csr_val, bias, active);
int feature = 0;
for(int i = 0; i < mybatch; ++i) {
if(active[i]) {
feature ++;
}
}
for(int j = 0; j < neuron; ++j) {
int curr_feature = 0;
for(int i = 0; i < mybatch; ++i) {
if(active[i]) {
nextfeat[j * feature + curr_feature] = nextfeat[j * mybatch + i];
curr_feature++;
}
}
}
mybatch = feature;
FEATPREC *tempfeat = currfeat;
currfeat = nextfeat;
nextfeat = tempfeat;
printf("real count = %d\n", feature);
}
__device__ inline void __float4Timesfloat(float4 a, float b, float4& c) {
c.x += a.x * b;
c.y += a.y * b;
c.z += a.z * b;
c.w += a.w * b;
}
__device__ inline void __float4AddfloatReLU(float4 a, float b, float4& c) {
c.x = __ReLU(a.x + b);
c.y = __ReLU(a.y + b);
c.z = __ReLU(a.z + b);
c.w = __ReLU(a.w + b);
}
__device__ inline void __float2Timesfloat(float2 a, float b, float2& c) {
c.x += a.x * b;
c.y += a.y * b;
}
__device__ inline void __float2AddfloatReLU(float2 a, float b, float2& c) {
c.x = __ReLU(a.x + b);
c.y = __ReLU(a.y + b);
}
template <typename T>
__device__ __forceinline__ T Load(const T* address) {
return __ldg(address);
}
inline __host__ __device__ int4 operator*(int4 a, int b)
{
return make_int4(a.x * b, a.y * b, a.z * b, a.w * b);
}
__device__ __forceinline__ void FMA(float x1, float x2, float* out) {
out[0] += x1 * x2;
}
static __device__ __forceinline__ void FMA(float4 x2, float x1, float4 *out) {
out[0].x += x1 * x2.x;
out[0].y += x1 * x2.y;
out[0].z += x1 * x2.z;
out[0].w += x1 * x2.w;
}
__global__ void sputnik_kernel(FEATPREC *nextfeat, FEATPREC *currfeat, int *wdispl, INDPREC *windex, VALPREC *wvalue, float bias, int *active, int mybatch){
const int col_index = blockIdx.x * 4 + threadIdx.y; // 每个 block 处理 4 列,每个 thread 只处理一列
const int feat_index = blockIdx.y * 32; // 每个 block 处理 32 个 feat
if(feat_index > mybatch) return;
const int row_offset = Load(wdispl + col_index); // 要处理的行的 开始位置
const int nnzs = Load(wdispl + col_index + 1) - row_offset; // 一定等于 32
//-------------
const int sparse_tile_size = 4 * 32 / 4; // 处理的 4 行,每行 32 个数,每个数站4个位
__shared__ int4 column_indices_tile_array[sparse_tile_size]; // 存储 所需要的下标
__shared__ float4 values_tile_array[sparse_tile_size]; // 存储 所需要的值
float4* values_tile = values_tile_array + 32 / 4 * threadIdx.y; // 这一个 thread 所要处理的 列 起始地址,每一行 8 个
int4* column_indices_tile = column_indices_tile_array + 32 / 4 * threadIdx.y;
float4* sparse_values_p = reinterpret_cast<float4*>(wvalue + row_offset) + threadIdx.x; // load 数据时,每个 thread 相应的指针(原始数据位置)
int4* sparse_indexs_p = reinterpret_cast<int4*>(windex + row_offset) + threadIdx.x;
float4 *sparse_values_pp = sparse_values_p;
int4* sparse_indexs_pp = sparse_indexs_p;
float4* values_tile_p = values_tile + threadIdx.x; // load 数据时,每个 thread 相应的指针(目标数据位置)
int4* column_indices_tile_p = column_indices_tile + threadIdx.x;
//-------------
const int dense_tile_size = 32 * 32 / 8 / 4; // 每个 block 处理 一项 feat的 32 个位置
__align__(16) float4 dense_matrix_tile_array[dense_tile_size]; // 一个 thread 处理 4 项 feat
float4* dense_tile_array = reinterpret_cast<float4*>(dense_matrix_tile_array); // 指针
float4* dense_value_p = reinterpret_cast<float4*>(currfeat + feat_index) + threadIdx.x; // 原始数据 开始位置
//-------------
const int output_tile_size = 32 / 8; // 每个 thread 得到 4 个 结果
__align__(16) float output_matrix_tile_array[output_tile_size] = {0};
__syncthreads();
#pragma unroll
for(int i = 0; i < 32 / blockDim.x / 4; ++i) {
*(values_tile_p) = Load(sparse_values_pp);
*(column_indices_tile_p) = Load(sparse_indexs_pp) * (mybatch / 4);
sparse_values_pp += blockDim.x;
sparse_indexs_pp += blockDim.x;
values_tile_p += blockDim.x;
column_indices_tile_p += blockDim.x;
}
__syncthreads();
#pragma unroll
for(int i = 0; i < 32 / 4; ++i) { // 每个 feat 加载 32 项
int* col_offset = reinterpret_cast<int*>(column_indices_tile + i);
for(int k = 0; k < 4; ++k) { // 加载 与 稀疏数据位置相对应的 稠密数据 位置
int offset = col_offset[k];
float4* dense_value_pp = dense_value_p + offset; // offset偏移了feat的多少项
for(int j = 0; j < 32 / blockDim.x / 4; ++j) {
int off = (i * 4 + k) * (32 / blockDim.x / 4) + j;
dense_tile_array[off] = Load(dense_value_pp); // 每一项加载了连续存储的 4 个 feat
dense_value_pp += blockDim.x;
}
}
}
float* sparse_value = reinterpret_cast<float*>(values_tile); // 开始计算 MAC,要处理的 列
for(int i = 0; i < 32; ++i) { // 每一列 32 个元素
float* dense_value = reinterpret_cast<float*>(dense_tile_array + i); // bug here!!! 简单写法
#pragma unroll
for(int k = 0; k < 4; ++k) { // 每个元素与 4 个 feat 做操作
#pragma unroll
for(int j = 0; j < 32 / blockDim.x / 4; ++j) {
float* outputs = output_matrix_tile_array + j * 4 + k; // maybe bug here!!!
FMA(dense_value[k], sparse_value[i], outputs);
}
}
}
for(int i = 0; i < 32 / blockDim.x / 4; ++i) {
for(int j = 0; j < 4; ++j) {
if(nextfeat[col_index * mybatch + feat_index + threadIdx.x * 4 + j] = __ReLU(output_matrix_tile_array[i * 4 + j] + bias)) {
active[feat_index + threadIdx.x * 4 + j] = 1; // bug here!!!
}
}
}
}
__global__ void sputnik_kernel2(FEATPREC *nextfeat, FEATPREC *currfeat, int *wdispl, INDPREC *windex, VALPREC *wvalue, float bias, int *active, int mybatch){
const int col_index = blockIdx.x * 32 + threadIdx.y; // 每个 block 处理 4 列,每个 thread 只处理一列
const int row_offset = Load(wdispl + col_index); // 要处理的行的 开始位置
const int nnzs = Load(wdispl + col_index + 1) - row_offset; // 一定等于 32
//-------------
const int sparse_tile_size = 32 * 32; // 处理的 4 行,每行 32 个数,每个数站4个位
__shared__ int column_indices_tile_array[sparse_tile_size]; // 存储 所需要的下标
__shared__ float values_tile_array[sparse_tile_size]; // 存储 所需要的值
float* values_tile = values_tile_array + 32 * threadIdx.y; // 这一个 thread 所要处理的 列 起始地址,每一行 8 个
int* column_indices_tile = column_indices_tile_array + 32 * threadIdx.y;
float4* sparse_values_p = reinterpret_cast<float4*>(wvalue + row_offset) + threadIdx.x; // load 数据时,每个 thread 相应的指针(原始数据位置)
int4* sparse_indexs_p = reinterpret_cast<int4*>(windex + row_offset) + threadIdx.x;
float4 *sparse_values_pp = sparse_values_p;
int4* sparse_indexs_pp = sparse_indexs_p;
float4* values_tile_p = reinterpret_cast<float4*>(values_tile) + threadIdx.x; // load 数据时,每个 thread 相应的指针(目标数据位置)
int4* column_indices_tile_p = reinterpret_cast<int4*>(column_indices_tile) + threadIdx.x;
//-------------
const int dense_tile_size = 32 * 32 / 8; // 每个 block 处理 一项 feat的 32 个位置
__align__(16) float dense_matrix_tile_array[dense_tile_size]; // 一个 thread 处理 4 项 feat
float4* dense_tile_array = reinterpret_cast<float4*>(dense_matrix_tile_array); // 指针
//-------------
const int output_tile_size = 32 / 8; // 每个 thread 得到 4 个 结果
__align__(16) float output_matrix_tile_array[output_tile_size] = {0};
__syncthreads();
#pragma unroll
for(int i = 0; i < 32 / blockDim.x / 4; ++i) {
*(values_tile_p) = Load(sparse_values_pp);
*(column_indices_tile_p) = Load(sparse_indexs_pp) * (mybatch / 4);
sparse_values_pp += blockDim.x;
sparse_indexs_pp += blockDim.x;
values_tile_p += blockDim.x;
column_indices_tile_p += blockDim.x;
}
#pragma unroll
for(int f = 0; f < 2; ++f) {
int feat_index = blockIdx.y * 32 * 2 + f * 32;
if(feat_index > mybatch) return;
__syncthreads();
float4* dense_value_p = reinterpret_cast<float4*>(currfeat + feat_index) + threadIdx.x; // 原始数据 开始位置
#pragma unroll
for(int i = 0; i < 32; ++i) { // 每个 feat 加载 32 项
int* col_offset = reinterpret_cast<int*>(column_indices_tile + i);
#pragma unroll
for(int k = 0; k < 1; ++k) { // 加载 与 稀疏数据位置相对应的 稠密数据 位置
float4* dense_value_pp = reinterpret_cast<float4*>(dense_value_p + col_offset[k]); // offset偏移了feat的多少项
#pragma unroll
for(int j = 0; j < 1; ++j) {
int off = (i * 1 * 1) + k * 1 + j;
dense_tile_array[off] = Load(dense_value_pp); // 每一项加载了连续存储的 4 个 feat
dense_value_pp += blockDim.x;
}
}
}
float* sparse_value = reinterpret_cast<float*>(values_tile); // 开始计算 MAC,要处理的 列
float4* dense_value = reinterpret_cast<float4*>(dense_tile_array); // bug here!!! 简单写法
#pragma unroll
for(int i = 0; i < 32; ++i) { // 每一列 32 个元素
float* lhs_values = (sparse_value + i);
#pragma unroll
for(int k = 0; k < 1; ++k) { // 每个元素与 4 个 feat 做操作
#pragma unroll
for(int j = 0; j < 1; ++j) {
float4* outputs = reinterpret_cast<float4*>(output_matrix_tile_array + j * 4 * 1); // maybe bug here!!!
int rhs_offset = j * 1 * 1 + k * 1 + i;
FMA(dense_value[rhs_offset], lhs_values[k], outputs);
}
}
}
#pragma unroll
for(int i = 0; i < 32 / blockDim.x / 4; ++i) {
#pragma unroll
for(int j = 0; j < 4; ++j) {
if(
nextfeat[col_index * mybatch + feat_index + threadIdx.x * 4 + j] = __ReLU(output_matrix_tile_array[i * 4 + j] + bias)
){
active[feat_index + threadIdx.x * 4 + j] = 1; // bug here!!!
}
output_matrix_tile_array[i * 4 + j] = 0;
}
}
}
}
__global__ void dummy_kernel(FEATPREC *nextfeat, FEATPREC *currfeat, int *wdispl, INDPREC *windex, VALPREC *wvalue, float bias, int *active, int mybatch){
extern __shared__ float shared[];
const int weight_block_size = blockDim.x / 32;
const int batch_per_round = 32 * 4;
const int weight_block = blockIdx.x;
const int current_block_index_begin = wdispl[weight_block * weight_block_size];
const int current_block_index_end = wdispl[(weight_block + 1) * (weight_block_size)];
const int current_block_index_size = current_block_index_end - current_block_index_begin;
int* shared_index = (int*)(shared + 32 * weight_block_size);
for(int idx = threadIdx.x; idx < current_block_index_size; idx += blockDim.x) {
shared[idx] = wvalue[current_block_index_begin + idx];
shared_index[idx] = windex[current_block_index_begin + idx];
}
__syncthreads();
int col = threadIdx.x / 32 + blockIdx.x * weight_block_size;
int thread_idx = threadIdx.x % 32;
for(int batch_idx = blockIdx.y * mybatch / (batch_per_round) / gridDim.y;
batch_idx < (blockIdx.y + 1) * (mybatch + batch_per_round - 1) / (batch_per_round) / gridDim.y;
batch_idx += 1) { // 展开此循环
float4 reg[1] = {0};
for(int idx = wdispl[col]; idx < wdispl[col + 1]; idx += 4) { // 同一个的 idx 会更新同一块区域
float4 weight_reg = reinterpret_cast<float4*>(shared)[(idx - current_block_index_begin) / 4];
int4 weight_idx_reg = reinterpret_cast<int4*>(shared_index)[(idx - current_block_index_begin) / 4];
if((batch_idx + 0) * batch_per_round + thread_idx * 4 + 1 < mybatch) {
for(int unroll = 0; unroll < 1; ++unroll) {
int feature_id = (batch_idx + unroll) * batch_per_round;
auto feat_p0 = reinterpret_cast<float4*>(currfeat
+ weight_idx_reg.x * mybatch + feature_id);
auto feat_p1 = reinterpret_cast<float4*>(currfeat
+ weight_idx_reg.y * mybatch + feature_id);
auto feat_p2 = reinterpret_cast<float4*>(currfeat
+ weight_idx_reg.z * mybatch + feature_id);
auto feat_p3 = reinterpret_cast<float4*>(currfeat
+ weight_idx_reg.w * mybatch + feature_id);
//__float4Timesfloat(feat_p[thread_idx], shared[idx - current_block_index_begin], reg);
__float4Timesfloat(feat_p0[thread_idx], weight_reg.x, reg[unroll]);
__float4Timesfloat(feat_p1[thread_idx], weight_reg.y, reg[unroll]);
__float4Timesfloat(feat_p2[thread_idx], weight_reg.z, reg[unroll]);
__float4Timesfloat(feat_p3[thread_idx], weight_reg.w, reg[unroll]);
}
}
}
// 写回分段后 batch 的结果
if((batch_idx + 0) * batch_per_round + thread_idx * 4 + 1 < mybatch) {
for(int unroll = 0; unroll < 1; ++unroll) {
int feature_id = (batch_idx + unroll) * batch_per_round;
auto feat_p = reinterpret_cast<float4*>(nextfeat + mybatch * col + feature_id);
__float4AddfloatReLU(reg[unroll], bias, feat_p[thread_idx]);
if(feat_p[thread_idx].x) {
active[(batch_idx + unroll) * batch_per_round + thread_idx * 4] = 1;
}
if(feat_p[thread_idx].y) {
active[(batch_idx + unroll) * batch_per_round + thread_idx * 4 + 1] = 1;
}
if(feat_p[thread_idx].z) {
active[(batch_idx + unroll) * batch_per_round + thread_idx * 4 + 2] = 1;
}
if(feat_p[thread_idx].w) {
active[(batch_idx + unroll) * batch_per_round + thread_idx * 4 + 3] = 1;
}
}
}
}
};
void setup_gpu(){
mybatch = batch;
cudaSetDevice(myid % 2); // 每个 node 两张 GPU
cudaFuncSetAttribute(dummy_kernel,cudaFuncAttributeMaxDynamicSharedMemorySize,98304);
OR_FATAL(cudaEventCreate(&kernelstart));
OR_FATAL(cudaEventCreate(&kernelstop));
OR_FATAL(cudaEventCreate(©start));
OR_FATAL(cudaEventCreate(©stop));
OR_FATAL(cudaStreamCreate(©stream));
OR_FATAL(cudaStreamCreate(&kernelstream));
OR_FATAL(cudaMallocHost((void**)&active,sizeof(int)*mybatch));
OR_FATAL(cudaMalloc((void**)&active_d,sizeof(int)*mybatch));
for(int k = 0; k < mybatch; k++){
active[k] = neuron;
}
OR_FATAL(cudaMemset(active_d,0,sizeof(int)*mybatch));
csrdispl_d = new int*[layer];
csrindex_d = new INDPREC*[layer];
csrvalue_d = new VALPREC*[layer];
for(int l = 0; l < layer; l++){
OR_FATAL(cudaMalloc((void**)&csrdispl_d[l], sizeof(int) * (neuron+1)));
OR_FATAL(cudaMemcpy(csrdispl_d[l], csrdispl[l], sizeof(int) * (neuron+1), cudaMemcpyHostToDevice));
OR_FATAL(cudaMalloc((void**)&csrindex_d[l],sizeof(INDPREC) * csrdispl[l][neuron]));
OR_FATAL(cudaMalloc((void**)&csrvalue_d[l],sizeof(VALPREC) * csrdispl[l][neuron]));
OR_FATAL(cudaMemcpy(csrindex_d[l], csrindex[l], sizeof(INDPREC) * csrdispl[l][neuron], cudaMemcpyHostToDevice));
OR_FATAL(cudaMemcpy(csrvalue_d[l], csrvalue[l], sizeof(VALPREC) * csrdispl[l][neuron], cudaMemcpyHostToDevice));
}
OR_FATAL(cudaMalloc((void**)&indbuff_d,sizeof(INDPREC)* neuron * 32));
OR_FATAL(cudaMalloc((void**)&valbuff_d,sizeof(VALPREC)* neuron * 32));
OR_FATAL(cudaMalloc((void**)&currfeat_d, sizeof(FEATPREC) * mybatch * neuron));
OR_FATAL(cudaMalloc((void**)&nextfeat_d, sizeof(FEATPREC) * mybatch * neuron));
OR_FATAL(cudaMemset(currfeat_d, 0, sizeof(FEATPREC) * mybatch * neuron));
OR_FATAL(cudaMemset(nextfeat_d, 0, sizeof(FEATPREC) * mybatch * neuron));
OR_FATAL(cudaMemcpy(currfeat_d, currfeat, sizeof(FEATPREC) * mybatch * neuron, cudaMemcpyHostToDevice));
}
void infer_gpu(int l){
int weight_block_size = 1;
int batch_per_round = 32;
int batch_block_size = mybatch / (batch_per_round * 4) + 1;
int shared_memory_size = 32 * weight_block_size * sizeof(float);
dim3 block(8, 32);
dim3 grid(neuron / 32, (mybatch / (32 * 2) + 1));
indbuff_d = csrindex_d[l];
valbuff_d = csrvalue_d[l];
auto startkernel = sc::now();
OR_FATAL(cudaMemsetAsync(active_d, 0, sizeof(int) * mybatch, kernelstream));
OR_FATAL(cudaEventRecord(kernelstart,kernelstream));
sputnik_kernel2<<<grid, block, 0, kernelstream>>>(nextfeat_d, currfeat_d, csrdispl_d[l], indbuff_d, valbuff_d,bias, active_d, mybatch);
OR_FATAL(cudaEventRecord(kernelstop,kernelstream));
OR_FATAL(cudaMemcpyAsync(active, active_d, sizeof(int) * mybatch, cudaMemcpyDeviceToHost, kernelstream));
OR_FATAL(cudaStreamSynchronize(kernelstream));
timekernel += sc::now() - startkernel;
int feature = 0;
// for(int i = 0; i < mybatch; ++i) {
// active[i] = 0;
// }
// OR_FATAL(cudaMemcpy(nextfeat, nextfeat_d, neuron * mybatch * sizeof(float), cudaMemcpyDeviceToHost));
// for(int i = 0; i < mybatch; ++i) {
// for(int j = 0; j < neuron; ++j) {
// if(nextfeat[j * mybatch + i]) {
// active[i] = 1;
// }
// }
// }
for(int i = 0; i < mybatch; ++i) {
if(active[i]) {
feature ++;
}
}
int alignment_bias = (feature % 4 == 0) ? 0 : (4 - feature % 4);
feature += alignment_bias;
if(feature != mybatch) {
OR_FATAL(cudaMemcpy(nextfeat, nextfeat_d, neuron * mybatch * sizeof(float), cudaMemcpyDeviceToHost));
for(int j = 0; j < neuron; ++j) {
int curr_feature = 0;
for(int i = 0; i < mybatch + alignment_bias; ++i) {
if(i < mybatch && active[i]) {
nextfeat_tmp[j * feature + curr_feature] = nextfeat[j * mybatch + i];
curr_feature++;
}
if(i >= mybatch) {
nextfeat_tmp[j * feature + curr_feature] = 0;
curr_feature++;
}
}
}
OR_FATAL(cudaMemcpy(nextfeat_d, nextfeat_tmp, neuron * feature * sizeof(float), cudaMemcpyHostToDevice));
}
mybatch = feature;
FEATPREC *tempfeat_d = currfeat_d;
currfeat_d = nextfeat_d;
nextfeat_d = tempfeat_d;
printf("real count = %d\n", feature);
}; |
39f983b82f8b5c6ae7f1106cf6ec745d6727d981.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zgeaxpy.cu normal z -> d, Tue Feb 9 16:05:40 2016
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
dgeaxpy_kernel(
int num_rows,
int num_cols,
double alpha,
double * dx,
double beta,
double * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
int idx = row + j*num_rows;
dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ];
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * X + beta * Y on the GPU.
The input format is a dense matrix (vector block) stored in
magma_d_matrix format.
Arguments
---------
@param[in]
alpha double
scalar multiplier.
@param[in]
X magma_d_matrix
input/output matrix Y.
@param[in]
beta double
scalar multiplier.
@param[in,out]
Y magma_d_matrix*
input matrix X.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C"
magma_int_t
magma_dgeaxpy(
double alpha,
magma_d_matrix X,
double beta,
magma_d_matrix *Y,
magma_queue_t queue )
{
int m = X.num_rows;
int n = X.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( dgeaxpy_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, X.dval, beta, Y->dval );
return MAGMA_SUCCESS;
}
| 39f983b82f8b5c6ae7f1106cf6ec745d6727d981.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zgeaxpy.cu normal z -> d, Tue Feb 9 16:05:40 2016
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
dgeaxpy_kernel(
int num_rows,
int num_cols,
double alpha,
double * dx,
double beta,
double * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
int idx = row + j*num_rows;
dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ];
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * X + beta * Y on the GPU.
The input format is a dense matrix (vector block) stored in
magma_d_matrix format.
Arguments
---------
@param[in]
alpha double
scalar multiplier.
@param[in]
X magma_d_matrix
input/output matrix Y.
@param[in]
beta double
scalar multiplier.
@param[in,out]
Y magma_d_matrix*
input matrix X.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C"
magma_int_t
magma_dgeaxpy(
double alpha,
magma_d_matrix X,
double beta,
magma_d_matrix *Y,
magma_queue_t queue )
{
int m = X.num_rows;
int n = X.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
dgeaxpy_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, alpha, X.dval, beta, Y->dval );
return MAGMA_SUCCESS;
}
|
c5454b1c1ea36d0ec828e7c768c3fea77818bbed.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
// #include <complex.h>
#include <hip/hip_complex.h>
#include <sys/time.h>
#include <mpi.h>
// #include <hip/hip_runtime.h>
// #include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "dslash_base.h"
#include "lib_vec.h"
#include "qcd.h"
#include "qcd_mult.h"
#define MYRAND_MAX 32767
static unsigned long myrand_next;
static int myrand(void)
{
myrand_next = myrand_next * 1103515245 + 12345;
return((unsigned)(myrand_next/65536) % 32768);
}
static void mysrand(unsigned seed)
{
myrand_next = seed;
}
double mysecond()
{
struct timeval tp;
struct timezone tzp;
// int i;
// i = gettimeofday(&tp,&tzp);
gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
extern int qcdNx;
extern int qcdNy;
extern int qcdNz;
extern int qcdNt;
extern int qcdNxy;
extern int qcdNxyz;
extern int qcdNsite;
extern int qcdMyRank;
extern int qcdNProcs;
extern int qcdNetSize[4];
extern int qcdNetPos[4];
extern int qcdRankNeighbors[8];
extern int qcdSx;
extern int qcdSy;
extern int qcdSz;
extern int qcdSt;
#define QCD_NX 8
#define QCD_NY 8
#define QCD_NZ 8
#define QCD_NT 16
#define QCD_ENORM 1.0e-16
#define QCD_NITER 200
#define QCD_CKS 0.150
void set_src(int ids,int ics,QCDComplex* Bsrc,int NtimeS)
{
int NTsrc,NPEsrc;
// int ist,is,ie;
int ist;
NTsrc = NtimeS % qcdNt;
NPEsrc = (qcdNetSize[0] * qcdNetSize[1] * qcdNetSize[2]) * (NtimeS / qcdNt);
QCDLA_SetConst(Bsrc,0.0,qcdNsite);
ist = NTsrc * qcdNx * qcdNy * qcdNz;
if(qcdMyRank == NPEsrc){
#ifdef QCD_SPINOR_3x4
// 3x4
Bsrc[ist].v[ids*QCD_NCOL + ics] = 1.0;
#else
// 4x3
// Bsrc[ist].v[ics*QCD_ND + ids] = 1.0;
// Bsrc[ist].v[ics*QCD_ND + ids] = make_cuDoubleComplex(1.0, 0);
Bsrc[ist + (ics*QCD_ND + ids) * qcdNsite] = make_cuDoubleComplex(1.0, 0);
#endif
}
}
void uinit(double* pU,int lx,int ly,int lz,int lt)
{
int i,j,x,y,z,t,is,d;
int sx,sy,sz,st;
int ex,ey,ez,et;
QCDReal dt;
sx = ((qcdNetPos[0]) * (lx)) / (qcdNetSize[0]);
ex = ((qcdNetPos[0] + 1) * (lx)) / (qcdNetSize[0]);
sy = ((qcdNetPos[1]) * (ly)) / (qcdNetSize[1]);
ey = ((qcdNetPos[1] + 1) * (ly)) / (qcdNetSize[1]);
sz = ((qcdNetPos[2]) * (lz)) / (qcdNetSize[2]);
ez = ((qcdNetPos[2] + 1) * (lz)) / (qcdNetSize[2]);
st = ((qcdNetPos[3]) * (lt)) / (qcdNetSize[3]);
et = ((qcdNetPos[3] + 1) * (lt)) / (qcdNetSize[3]);
mysrand(100);
/* if(lx == 8 && ly == 8 && lz == 8 && lt == 16){ */
/* FILE* pFile; */
/* pFile = fopen("conf_08080816.txt","r"); */
/* for(t=0;t<lt;t++){ */
/* for(z=0;z<lz;z++){ */
/* for(y=0;y<ly;y++){ */
/* for(x=0;x<lx;x++){ */
/* if((x >= sx && x < ex) && (y >= sy && y < ey) && (z >= sz && z < ez) && (t >= st && t < et)){ */
/* for(i=0;i<4;i++){ */
/* is = 18*(i*qcdNsite + (x-sx) + (y-sy)*qcdNx + (z-sz)*qcdNxy + (t-st)*qcdNxyz); */
/* for(j=0;j<18;j++){ */
/* fscanf(pFile,"%lf",&pU[is + j]); */
/* } */
/* } */
/* } */
/* else{ */
/* for(i=0;i<4*18;i++){ */
/* fscanf(pFile,"%lf",&d); */
/* } */
/* //fseek(pFile,4*18*8,SEEK_CUR); */
/* } */
/* } */
/* } */
/* } */
/* } */
/* fclose(pFile); */
/* } */
/* else{ */
d = 0;
is = 0;
for(i=0;i<4;i++){
for(t=0;t<lt;t++){
for(z=0;z<lz;z++){
for(y=0;y<ly;y++){
for(x=0;x<lx;x++){
if((x >= sx && x < ex) && (y >= sy && y < ey) && (z >= sz && z < ez) && (t >= st && t < et)){
is = (x - sx) + (y - sy)*qcdNx + (z - sz)*qcdNxy + (t - st)*qcdNxyz + i*qcdNsite;
for(j=0;j<9;j++){
dt = 2.0*(QCDReal)myrand()/(QCDReal)MYRAND_MAX;
// pU[is++] = dt - 1.0;
pU[is * 2] = dt - 1.0;
dt = 2.0*(QCDReal)myrand()/(QCDReal)MYRAND_MAX;
// pU[is++] = dt - 1.0;
pU[is * 2 + 1] = dt - 1.0;
is += 4*qcdNsite;
}
}
else{
for(j=0;j<9;j++){
d += myrand();
d += myrand();
}
}
}
}
}
}
}
mysrand(d);
/* } */
}
void cuSolve_CG(QCDComplex* dpXq,QCDComplex* dpU,QCDComplex* dpB,double CKs,double enorm,int* pNconv,double* pDiff)
{
// static QCDSpinor* dpX = NULL;
// static QCDSpinor* dpS = NULL;
// static QCDSpinor* dpR = NULL;
// static QCDSpinor* dpP = NULL;
// static QCDSpinor* dpT = NULL;
static QCDComplex* dpX = NULL;
static QCDComplex* dpS = NULL;
static QCDComplex* dpR = NULL;
static QCDComplex* dpP = NULL;
static QCDComplex* dpT = NULL;
int iter,niter = 500;
double snorm,sr,ret;
double rr,rrp;
double cr, bk, pap;
int nconv = -1;
dim3 threads(128,1,1);
dim3 blocks(qcdNsite/threads.x,1,1);
static double* dret = NULL;
if(dpX == NULL){
checkCudaErrors(hipMalloc((void**)&dret, sizeof(double)));
checkCudaErrors(hipMalloc((void**)&dpX, sizeof(QCDComplex)*QCD_SPINOR_SIZE*qcdNsite));
checkCudaErrors(hipMalloc((void**)&dpS, sizeof(QCDComplex)*QCD_SPINOR_SIZE*qcdNsite));
checkCudaErrors(hipMalloc((void**)&dpR, sizeof(QCDComplex)*QCD_SPINOR_SIZE*qcdNsite));
checkCudaErrors(hipMalloc((void**)&dpP, sizeof(QCDComplex)*QCD_SPINOR_SIZE*qcdNsite));
checkCudaErrors(hipMalloc((void**)&dpT, sizeof(QCDComplex)*QCD_SPINOR_SIZE*qcdNsite));
cuQCDLA_Init(qcdNsite);
}
hipLaunchKernelGGL(( cuQCDLA_Equate), dim3(blocks), dim3(threads), 0, 0, dpS,dpB,qcdNsite);
cuQCDLA_Norm(dret, (double*)dpS,qcdNsite);
checkCudaErrors(hipMemcpy(&ret, dret, sizeof(double), hipMemcpyDeviceToHost));
MPI_Allreduce(&ret,&sr,1,MPI_DOUBLE_PRECISION,MPI_SUM,MPI_COMM_WORLD);
snorm = 1.0 / sr;
//init
hipLaunchKernelGGL(( cuQCDLA_Equate), dim3(blocks), dim3(threads), 0, 0, dpR,dpS,qcdNsite);
hipLaunchKernelGGL(( cuQCDLA_Equate), dim3(blocks), dim3(threads), 0, 0, dpX,dpS,qcdNsite);
cuQCDDopr_DdagD(dpS,dpU,dpX,dpT,CKs);
hipLaunchKernelGGL(( cuQCDLA_MultAddScalar), dim3(blocks), dim3(threads), 0, 0, dpR,dpS,-1.0,qcdNsite);
hipLaunchKernelGGL(( cuQCDLA_Equate), dim3(blocks), dim3(threads), 0, 0, dpP,dpR,qcdNsite);
cuQCDLA_Norm(dret,(double*)dpR,qcdNsite);
checkCudaErrors(hipMemcpy(&ret, dret, sizeof(double), hipMemcpyDeviceToHost));
MPI_Allreduce(&ret,&rr,1,MPI_DOUBLE_PRECISION,MPI_SUM,MPI_COMM_WORLD);
rrp = rr;
// printf("%f\n", rr);
for(iter=0; iter < niter; iter++){
cuQCDDopr_DdagD(dpS,dpU,dpP,dpT,CKs);
cuQCDLA_DotProd(dret,(double*)dpS,(double*)dpP,qcdNsite);
checkCudaErrors(hipMemcpy(&ret, dret, sizeof(double), hipMemcpyDeviceToHost));
MPI_Allreduce(&ret,&pap,1,MPI_DOUBLE_PRECISION,MPI_SUM,MPI_COMM_WORLD);
cr = rrp/pap;
hipLaunchKernelGGL(( cuQCDLA_MultAddScalar), dim3(blocks), dim3(threads), 0, 0, dpX,dpP,cr,qcdNsite);
hipLaunchKernelGGL(( cuQCDLA_MultAddScalar), dim3(blocks), dim3(threads), 0, 0, dpR,dpS,-cr,qcdNsite);
cuQCDLA_Norm(dret,(double*)dpR,qcdNsite);
checkCudaErrors(hipMemcpy(&ret, dret, sizeof(double), hipMemcpyDeviceToHost));
MPI_Allreduce(&ret,&rr,1,MPI_DOUBLE_PRECISION,MPI_SUM,MPI_COMM_WORLD);
bk = rr/rrp;
hipLaunchKernelGGL(( cuQCDLA_MultScalar), dim3(blocks), dim3(threads), 0, 0, dpP,dpP,bk,qcdNsite);
hipLaunchKernelGGL(( cuQCDLA_MultAddScalar), dim3(blocks), dim3(threads), 0, 0, dpP,dpR,1.0,qcdNsite);
rrp = rr;
// printf("%f\n", rr*snorm);
if(rr*snorm < enorm){
nconv = iter;
break;
}
}
if(nconv == -1 && qcdMyRank == 0){
printf(" not converged\n");
}
hipLaunchKernelGGL(( cuQCDLA_Equate), dim3(blocks), dim3(threads), 0, 0, dpXq,dpX,qcdNsite);
cuQCDDopr_DdagD(dpR,dpU,dpX,dpT,CKs);
hipLaunchKernelGGL(( cuQCDLA_MultAddScalar), dim3(blocks), dim3(threads), 0, 0, dpR,dpB,-1.0,qcdNsite);
cuQCDLA_Norm(dret,(double*)dpR,qcdNsite);
checkCudaErrors(hipMemcpy(&ret, dret, sizeof(double), hipMemcpyDeviceToHost));
MPI_Allreduce(&ret,&rr,1,MPI_DOUBLE_PRECISION,MPI_SUM,MPI_COMM_WORLD);
*pDiff = rr;
*pNconv = nconv;
}
int main(int argc,char** argv)
{
int myrank=0,nprocs=1;
int ngpus=1;
int latsize[4],localsize[4];
// int netSize[16],netPos[16],netDim;
int netSize[16],netPos[16];
int i,j,t,npIn,nsite;
// int Niter = QCD_NITER;
QCDComplex* pSrc;
QCDComplex* pDest;
QCDComplex* pGauge;
QCDComplex* dpSrc;
QCDComplex* dpDest;
QCDComplex* dpGauge;
QCDReal Enorm = QCD_ENORM;
QCDReal Cks = QCD_CKS;
QCDReal* pCorr;
double tstart,tend,ttotal;
// char* pStr;
// int ItimeS,NtimeS,ics,ids,is,ie,ipet,it,Nconv,cnt;
// int ics,ids,is,ipet,it,Nconv;
int ics,ids,is,ipet,it,Nconv,iv;
// double CorrF,Diff,rr;
double CorrF,Diff,rr,tCorrF;
// unsigned long flops;
// double tt;
latsize[0] = 0;
latsize[1] = 0;
latsize[2] = 0;
latsize[3] = 0;
// netDim = 4;
netSize[0] = 0;
netSize[1] = 0;
netSize[2] = 0;
netSize[3] = 0;
for(i=1;i<argc;i++){
if(argv[i][0] == 'L'){
t = 0;
for(j=1;j<strlen(argv[i]);j++){
if(argv[i][j] == 'x'){
t++;
}
else if(argv[i][j] >= '0' && argv[i][j] <= '9'){
latsize[t] = 10*latsize[t] + (int)(argv[i][j] - '0');
}
}
}
else if(argv[i][0] == 'P'){
t = 0;
for(j=1;j<strlen(argv[i]);j++){
if(argv[i][j] == 'x'){
t++;
}
else if(argv[i][j] >= '0' && argv[i][j] <= '9'){
netSize[t] = 10*netSize[t] + (int)(argv[i][j] - '0');
}
}
}
else if(argv[i][0] == 'G'){
ngpus = (int)(argv[i][1] - '0');
}
}
t = 0;
for(i=0;i<4;i++){
if(latsize[0] == 0){
t++;
}
}
if(t > 0){
latsize[0] = QCD_NX;
latsize[1] = QCD_NY;
latsize[2] = QCD_NZ;
latsize[3] = QCD_NT;
}
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
checkCudaErrors(hipSetDevice(myrank % ngpus));
npIn = 1;
for(i=0;i<4;i++){
npIn *= netSize[i];
//debug
/* printf("netSize[%d] == %d\n", i, netSize[i]); */
}
if(npIn != nprocs){
if(myrank == 0){
printf("Number of processes is invalid\n");
}
return 0;
}
nsite = 1;
for(i=0;i<4;i++){
localsize[i] = latsize[i] / netSize[i];
nsite *= localsize[i];
}
t = myrank;
for(i=0;i<4;i++){
netPos[i] = t % netSize[i];
t /= netSize[i];
}
QCDDopr_Init(localsize[0],localsize[1],localsize[2],localsize[3],netSize[0],netSize[1],netSize[2],netSize[3],myrank);
if(myrank == 0){
printf("=============================================\n");
printf("QCD base MPI program\n");
printf(" Lattice size = %dx%dx%dx%d\n",latsize[0],latsize[1],latsize[2],latsize[3]);
printf("Decomposed by %d procs : %dx%dx%dx%d\n",nprocs,netSize[0],netSize[1],netSize[2],netSize[3]);
printf(" Local Lattice size = %dx%dx%dx%d\n",localsize[0],localsize[1],localsize[2],localsize[3]);
printf("\n Cks = %f\n",Cks);
printf("=============================================\n");
}
pGauge = (QCDComplex*)malloc(sizeof(QCDComplex) * QCD_MATRIX_SIZE * 4 * nsite + 512);
uinit((QCDReal*)pGauge,latsize[0],latsize[1],latsize[2],latsize[3]);
checkCudaErrors(hipMalloc((void**)&dpGauge, sizeof(QCDComplex) * QCD_MATRIX_SIZE * 4 * nsite + 512));
checkCudaErrors(hipMemcpy(dpGauge, pGauge, sizeof(QCDComplex) * QCD_MATRIX_SIZE * 4 * nsite + 512, hipMemcpyHostToDevice));
pSrc = (QCDComplex*)malloc(sizeof(QCDComplex) * QCD_SPINOR_SIZE * nsite + 128);
pDest = (QCDComplex*)malloc(sizeof(QCDComplex) * QCD_SPINOR_SIZE * nsite + 128);
checkCudaErrors(hipMalloc((void**)&dpSrc, sizeof(QCDComplex) * QCD_SPINOR_SIZE * nsite + 128));
checkCudaErrors(hipMalloc((void**)&dpDest, sizeof(QCDComplex) * QCD_SPINOR_SIZE * nsite + 128));
pCorr = (QCDReal*)malloc(sizeof(QCDReal) * latsize[3]);
for(i=0;i<latsize[3];i++){
pCorr[i] = 0.0;
}
ttotal = 0.0;
/* for(ics=0;ics<QCD_NCOL;ics++){ */
/* for(ids=0;ids<QCD_ND;ids++){ */
for(ics=0;ics<1;ics++){
for(ids=0;ids<1;ids++){
set_src(ids,ics,pSrc,0);
checkCudaErrors(hipMemcpy(dpSrc, pSrc, sizeof(QCDComplex) * QCD_SPINOR_SIZE * nsite + 128, hipMemcpyHostToDevice));
MPI_Barrier(MPI_COMM_WORLD);
tstart = mysecond();
// Solve_CG(pDest,pGauge,pSrc,Cks,Enorm,&Nconv,&Diff);
cuSolve_CG(dpDest,dpGauge,dpSrc,Cks,Enorm,&Nconv,&Diff);
MPI_Barrier(MPI_COMM_WORLD);
tend = mysecond() - tstart;
ttotal += tend;
checkCudaErrors(hipMemcpy(pDest, dpDest, sizeof(QCDComplex) * QCD_SPINOR_SIZE * nsite + 128, hipMemcpyDeviceToHost));
if(myrank == 0){
printf(" %3d %3d %6d %12.4e ... %f sec\n", ics, ids, Nconv, Diff,tend);
}
for(i=0;i<latsize[3];i++){
ipet = i/localsize[3];
it = i % localsize[3];
if(ipet == netPos[3]){
is = it*localsize[0]*localsize[1]*localsize[2];
// QCDLA_Norm(&CorrF,(double*)(pDest + is),localsize[0]*localsize[1]*localsize[2]);
CorrF = 0.0;
for (iv = 0; iv < QCD_SPINOR_SIZE; iv++) {
QCDLA_Norm_Simple(&tCorrF,(double*)(pDest + is + iv * qcdNsite), 2*localsize[0]*localsize[1]*localsize[2]);
CorrF += tCorrF;
}
}
else{
CorrF = 0.0;
}
MPI_Allreduce(&CorrF,&rr,1,MPI_DOUBLE_PRECISION,MPI_SUM,MPI_COMM_WORLD);
pCorr[i] = pCorr[i] + rr;
}
}
}
if(myrank == 0){
printf("\nPs meson correlator:\n");
for(i=0;i<latsize[3];i++){
printf("%d: %0.8E\n",i,pCorr[i]);
}
printf("\n Avg. Solver Time = %f [sec]\n",ttotal / 12);
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
//debug
/* printf("finish\n"); */
return 0;
}
| c5454b1c1ea36d0ec828e7c768c3fea77818bbed.cu | #include <stdio.h>
#include <stdlib.h>
// #include <complex.h>
#include <cuComplex.h>
#include <sys/time.h>
#include <mpi.h>
// #include <cuda.h>
// #include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "dslash_base.h"
#include "lib_vec.h"
#include "qcd.h"
#include "qcd_mult.h"
#define MYRAND_MAX 32767
static unsigned long myrand_next;
static int myrand(void)
{
myrand_next = myrand_next * 1103515245 + 12345;
return((unsigned)(myrand_next/65536) % 32768);
}
static void mysrand(unsigned seed)
{
myrand_next = seed;
}
double mysecond()
{
struct timeval tp;
struct timezone tzp;
// int i;
// i = gettimeofday(&tp,&tzp);
gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
extern int qcdNx;
extern int qcdNy;
extern int qcdNz;
extern int qcdNt;
extern int qcdNxy;
extern int qcdNxyz;
extern int qcdNsite;
extern int qcdMyRank;
extern int qcdNProcs;
extern int qcdNetSize[4];
extern int qcdNetPos[4];
extern int qcdRankNeighbors[8];
extern int qcdSx;
extern int qcdSy;
extern int qcdSz;
extern int qcdSt;
#define QCD_NX 8
#define QCD_NY 8
#define QCD_NZ 8
#define QCD_NT 16
#define QCD_ENORM 1.0e-16
#define QCD_NITER 200
#define QCD_CKS 0.150
void set_src(int ids,int ics,QCDComplex* Bsrc,int NtimeS)
{
int NTsrc,NPEsrc;
// int ist,is,ie;
int ist;
NTsrc = NtimeS % qcdNt;
NPEsrc = (qcdNetSize[0] * qcdNetSize[1] * qcdNetSize[2]) * (NtimeS / qcdNt);
QCDLA_SetConst(Bsrc,0.0,qcdNsite);
ist = NTsrc * qcdNx * qcdNy * qcdNz;
if(qcdMyRank == NPEsrc){
#ifdef QCD_SPINOR_3x4
// 3x4
Bsrc[ist].v[ids*QCD_NCOL + ics] = 1.0;
#else
// 4x3
// Bsrc[ist].v[ics*QCD_ND + ids] = 1.0;
// Bsrc[ist].v[ics*QCD_ND + ids] = make_cuDoubleComplex(1.0, 0);
Bsrc[ist + (ics*QCD_ND + ids) * qcdNsite] = make_cuDoubleComplex(1.0, 0);
#endif
}
}
void uinit(double* pU,int lx,int ly,int lz,int lt)
{
int i,j,x,y,z,t,is,d;
int sx,sy,sz,st;
int ex,ey,ez,et;
QCDReal dt;
sx = ((qcdNetPos[0]) * (lx)) / (qcdNetSize[0]);
ex = ((qcdNetPos[0] + 1) * (lx)) / (qcdNetSize[0]);
sy = ((qcdNetPos[1]) * (ly)) / (qcdNetSize[1]);
ey = ((qcdNetPos[1] + 1) * (ly)) / (qcdNetSize[1]);
sz = ((qcdNetPos[2]) * (lz)) / (qcdNetSize[2]);
ez = ((qcdNetPos[2] + 1) * (lz)) / (qcdNetSize[2]);
st = ((qcdNetPos[3]) * (lt)) / (qcdNetSize[3]);
et = ((qcdNetPos[3] + 1) * (lt)) / (qcdNetSize[3]);
mysrand(100);
/* if(lx == 8 && ly == 8 && lz == 8 && lt == 16){ */
/* FILE* pFile; */
/* pFile = fopen("conf_08080816.txt","r"); */
/* for(t=0;t<lt;t++){ */
/* for(z=0;z<lz;z++){ */
/* for(y=0;y<ly;y++){ */
/* for(x=0;x<lx;x++){ */
/* if((x >= sx && x < ex) && (y >= sy && y < ey) && (z >= sz && z < ez) && (t >= st && t < et)){ */
/* for(i=0;i<4;i++){ */
/* is = 18*(i*qcdNsite + (x-sx) + (y-sy)*qcdNx + (z-sz)*qcdNxy + (t-st)*qcdNxyz); */
/* for(j=0;j<18;j++){ */
/* fscanf(pFile,"%lf",&pU[is + j]); */
/* } */
/* } */
/* } */
/* else{ */
/* for(i=0;i<4*18;i++){ */
/* fscanf(pFile,"%lf",&d); */
/* } */
/* //fseek(pFile,4*18*8,SEEK_CUR); */
/* } */
/* } */
/* } */
/* } */
/* } */
/* fclose(pFile); */
/* } */
/* else{ */
d = 0;
is = 0;
for(i=0;i<4;i++){
for(t=0;t<lt;t++){
for(z=0;z<lz;z++){
for(y=0;y<ly;y++){
for(x=0;x<lx;x++){
if((x >= sx && x < ex) && (y >= sy && y < ey) && (z >= sz && z < ez) && (t >= st && t < et)){
is = (x - sx) + (y - sy)*qcdNx + (z - sz)*qcdNxy + (t - st)*qcdNxyz + i*qcdNsite;
for(j=0;j<9;j++){
dt = 2.0*(QCDReal)myrand()/(QCDReal)MYRAND_MAX;
// pU[is++] = dt - 1.0;
pU[is * 2] = dt - 1.0;
dt = 2.0*(QCDReal)myrand()/(QCDReal)MYRAND_MAX;
// pU[is++] = dt - 1.0;
pU[is * 2 + 1] = dt - 1.0;
is += 4*qcdNsite;
}
}
else{
for(j=0;j<9;j++){
d += myrand();
d += myrand();
}
}
}
}
}
}
}
mysrand(d);
/* } */
}
void cuSolve_CG(QCDComplex* dpXq,QCDComplex* dpU,QCDComplex* dpB,double CKs,double enorm,int* pNconv,double* pDiff)
{
// static QCDSpinor* dpX = NULL;
// static QCDSpinor* dpS = NULL;
// static QCDSpinor* dpR = NULL;
// static QCDSpinor* dpP = NULL;
// static QCDSpinor* dpT = NULL;
static QCDComplex* dpX = NULL;
static QCDComplex* dpS = NULL;
static QCDComplex* dpR = NULL;
static QCDComplex* dpP = NULL;
static QCDComplex* dpT = NULL;
int iter,niter = 500;
double snorm,sr,ret;
double rr,rrp;
double cr, bk, pap;
int nconv = -1;
dim3 threads(128,1,1);
dim3 blocks(qcdNsite/threads.x,1,1);
static double* dret = NULL;
if(dpX == NULL){
checkCudaErrors(cudaMalloc((void**)&dret, sizeof(double)));
checkCudaErrors(cudaMalloc((void**)&dpX, sizeof(QCDComplex)*QCD_SPINOR_SIZE*qcdNsite));
checkCudaErrors(cudaMalloc((void**)&dpS, sizeof(QCDComplex)*QCD_SPINOR_SIZE*qcdNsite));
checkCudaErrors(cudaMalloc((void**)&dpR, sizeof(QCDComplex)*QCD_SPINOR_SIZE*qcdNsite));
checkCudaErrors(cudaMalloc((void**)&dpP, sizeof(QCDComplex)*QCD_SPINOR_SIZE*qcdNsite));
checkCudaErrors(cudaMalloc((void**)&dpT, sizeof(QCDComplex)*QCD_SPINOR_SIZE*qcdNsite));
cuQCDLA_Init(qcdNsite);
}
cuQCDLA_Equate<<<blocks, threads>>>(dpS,dpB,qcdNsite);
cuQCDLA_Norm(dret, (double*)dpS,qcdNsite);
checkCudaErrors(cudaMemcpy(&ret, dret, sizeof(double), cudaMemcpyDeviceToHost));
MPI_Allreduce(&ret,&sr,1,MPI_DOUBLE_PRECISION,MPI_SUM,MPI_COMM_WORLD);
snorm = 1.0 / sr;
//init
cuQCDLA_Equate<<<blocks, threads>>>(dpR,dpS,qcdNsite);
cuQCDLA_Equate<<<blocks, threads>>>(dpX,dpS,qcdNsite);
cuQCDDopr_DdagD(dpS,dpU,dpX,dpT,CKs);
cuQCDLA_MultAddScalar<<<blocks, threads>>>(dpR,dpS,-1.0,qcdNsite);
cuQCDLA_Equate<<<blocks, threads>>>(dpP,dpR,qcdNsite);
cuQCDLA_Norm(dret,(double*)dpR,qcdNsite);
checkCudaErrors(cudaMemcpy(&ret, dret, sizeof(double), cudaMemcpyDeviceToHost));
MPI_Allreduce(&ret,&rr,1,MPI_DOUBLE_PRECISION,MPI_SUM,MPI_COMM_WORLD);
rrp = rr;
// printf("%f\n", rr);
for(iter=0; iter < niter; iter++){
cuQCDDopr_DdagD(dpS,dpU,dpP,dpT,CKs);
cuQCDLA_DotProd(dret,(double*)dpS,(double*)dpP,qcdNsite);
checkCudaErrors(cudaMemcpy(&ret, dret, sizeof(double), cudaMemcpyDeviceToHost));
MPI_Allreduce(&ret,&pap,1,MPI_DOUBLE_PRECISION,MPI_SUM,MPI_COMM_WORLD);
cr = rrp/pap;
cuQCDLA_MultAddScalar<<<blocks, threads>>>(dpX,dpP,cr,qcdNsite);
cuQCDLA_MultAddScalar<<<blocks, threads>>>(dpR,dpS,-cr,qcdNsite);
cuQCDLA_Norm(dret,(double*)dpR,qcdNsite);
checkCudaErrors(cudaMemcpy(&ret, dret, sizeof(double), cudaMemcpyDeviceToHost));
MPI_Allreduce(&ret,&rr,1,MPI_DOUBLE_PRECISION,MPI_SUM,MPI_COMM_WORLD);
bk = rr/rrp;
cuQCDLA_MultScalar<<<blocks, threads>>>(dpP,dpP,bk,qcdNsite);
cuQCDLA_MultAddScalar<<<blocks, threads>>>(dpP,dpR,1.0,qcdNsite);
rrp = rr;
// printf("%f\n", rr*snorm);
if(rr*snorm < enorm){
nconv = iter;
break;
}
}
if(nconv == -1 && qcdMyRank == 0){
printf(" not converged\n");
}
cuQCDLA_Equate<<<blocks, threads>>>(dpXq,dpX,qcdNsite);
cuQCDDopr_DdagD(dpR,dpU,dpX,dpT,CKs);
cuQCDLA_MultAddScalar<<<blocks, threads>>>(dpR,dpB,-1.0,qcdNsite);
cuQCDLA_Norm(dret,(double*)dpR,qcdNsite);
checkCudaErrors(cudaMemcpy(&ret, dret, sizeof(double), cudaMemcpyDeviceToHost));
MPI_Allreduce(&ret,&rr,1,MPI_DOUBLE_PRECISION,MPI_SUM,MPI_COMM_WORLD);
*pDiff = rr;
*pNconv = nconv;
}
int main(int argc,char** argv)
{
int myrank=0,nprocs=1;
int ngpus=1;
int latsize[4],localsize[4];
// int netSize[16],netPos[16],netDim;
int netSize[16],netPos[16];
int i,j,t,npIn,nsite;
// int Niter = QCD_NITER;
QCDComplex* pSrc;
QCDComplex* pDest;
QCDComplex* pGauge;
QCDComplex* dpSrc;
QCDComplex* dpDest;
QCDComplex* dpGauge;
QCDReal Enorm = QCD_ENORM;
QCDReal Cks = QCD_CKS;
QCDReal* pCorr;
double tstart,tend,ttotal;
// char* pStr;
// int ItimeS,NtimeS,ics,ids,is,ie,ipet,it,Nconv,cnt;
// int ics,ids,is,ipet,it,Nconv;
int ics,ids,is,ipet,it,Nconv,iv;
// double CorrF,Diff,rr;
double CorrF,Diff,rr,tCorrF;
// unsigned long flops;
// double tt;
latsize[0] = 0;
latsize[1] = 0;
latsize[2] = 0;
latsize[3] = 0;
// netDim = 4;
netSize[0] = 0;
netSize[1] = 0;
netSize[2] = 0;
netSize[3] = 0;
for(i=1;i<argc;i++){
if(argv[i][0] == 'L'){
t = 0;
for(j=1;j<strlen(argv[i]);j++){
if(argv[i][j] == 'x'){
t++;
}
else if(argv[i][j] >= '0' && argv[i][j] <= '9'){
latsize[t] = 10*latsize[t] + (int)(argv[i][j] - '0');
}
}
}
else if(argv[i][0] == 'P'){
t = 0;
for(j=1;j<strlen(argv[i]);j++){
if(argv[i][j] == 'x'){
t++;
}
else if(argv[i][j] >= '0' && argv[i][j] <= '9'){
netSize[t] = 10*netSize[t] + (int)(argv[i][j] - '0');
}
}
}
else if(argv[i][0] == 'G'){
ngpus = (int)(argv[i][1] - '0');
}
}
t = 0;
for(i=0;i<4;i++){
if(latsize[0] == 0){
t++;
}
}
if(t > 0){
latsize[0] = QCD_NX;
latsize[1] = QCD_NY;
latsize[2] = QCD_NZ;
latsize[3] = QCD_NT;
}
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
checkCudaErrors(cudaSetDevice(myrank % ngpus));
npIn = 1;
for(i=0;i<4;i++){
npIn *= netSize[i];
//debug
/* printf("netSize[%d] == %d\n", i, netSize[i]); */
}
if(npIn != nprocs){
if(myrank == 0){
printf("Number of processes is invalid\n");
}
return 0;
}
nsite = 1;
for(i=0;i<4;i++){
localsize[i] = latsize[i] / netSize[i];
nsite *= localsize[i];
}
t = myrank;
for(i=0;i<4;i++){
netPos[i] = t % netSize[i];
t /= netSize[i];
}
QCDDopr_Init(localsize[0],localsize[1],localsize[2],localsize[3],netSize[0],netSize[1],netSize[2],netSize[3],myrank);
if(myrank == 0){
printf("=============================================\n");
printf("QCD base MPI program\n");
printf(" Lattice size = %dx%dx%dx%d\n",latsize[0],latsize[1],latsize[2],latsize[3]);
printf("Decomposed by %d procs : %dx%dx%dx%d\n",nprocs,netSize[0],netSize[1],netSize[2],netSize[3]);
printf(" Local Lattice size = %dx%dx%dx%d\n",localsize[0],localsize[1],localsize[2],localsize[3]);
printf("\n Cks = %f\n",Cks);
printf("=============================================\n");
}
pGauge = (QCDComplex*)malloc(sizeof(QCDComplex) * QCD_MATRIX_SIZE * 4 * nsite + 512);
uinit((QCDReal*)pGauge,latsize[0],latsize[1],latsize[2],latsize[3]);
checkCudaErrors(cudaMalloc((void**)&dpGauge, sizeof(QCDComplex) * QCD_MATRIX_SIZE * 4 * nsite + 512));
checkCudaErrors(cudaMemcpy(dpGauge, pGauge, sizeof(QCDComplex) * QCD_MATRIX_SIZE * 4 * nsite + 512, cudaMemcpyHostToDevice));
pSrc = (QCDComplex*)malloc(sizeof(QCDComplex) * QCD_SPINOR_SIZE * nsite + 128);
pDest = (QCDComplex*)malloc(sizeof(QCDComplex) * QCD_SPINOR_SIZE * nsite + 128);
checkCudaErrors(cudaMalloc((void**)&dpSrc, sizeof(QCDComplex) * QCD_SPINOR_SIZE * nsite + 128));
checkCudaErrors(cudaMalloc((void**)&dpDest, sizeof(QCDComplex) * QCD_SPINOR_SIZE * nsite + 128));
pCorr = (QCDReal*)malloc(sizeof(QCDReal) * latsize[3]);
for(i=0;i<latsize[3];i++){
pCorr[i] = 0.0;
}
ttotal = 0.0;
/* for(ics=0;ics<QCD_NCOL;ics++){ */
/* for(ids=0;ids<QCD_ND;ids++){ */
for(ics=0;ics<1;ics++){
for(ids=0;ids<1;ids++){
set_src(ids,ics,pSrc,0);
checkCudaErrors(cudaMemcpy(dpSrc, pSrc, sizeof(QCDComplex) * QCD_SPINOR_SIZE * nsite + 128, cudaMemcpyHostToDevice));
MPI_Barrier(MPI_COMM_WORLD);
tstart = mysecond();
// Solve_CG(pDest,pGauge,pSrc,Cks,Enorm,&Nconv,&Diff);
cuSolve_CG(dpDest,dpGauge,dpSrc,Cks,Enorm,&Nconv,&Diff);
MPI_Barrier(MPI_COMM_WORLD);
tend = mysecond() - tstart;
ttotal += tend;
checkCudaErrors(cudaMemcpy(pDest, dpDest, sizeof(QCDComplex) * QCD_SPINOR_SIZE * nsite + 128, cudaMemcpyDeviceToHost));
if(myrank == 0){
printf(" %3d %3d %6d %12.4e ... %f sec\n", ics, ids, Nconv, Diff,tend);
}
for(i=0;i<latsize[3];i++){
ipet = i/localsize[3];
it = i % localsize[3];
if(ipet == netPos[3]){
is = it*localsize[0]*localsize[1]*localsize[2];
// QCDLA_Norm(&CorrF,(double*)(pDest + is),localsize[0]*localsize[1]*localsize[2]);
CorrF = 0.0;
for (iv = 0; iv < QCD_SPINOR_SIZE; iv++) {
QCDLA_Norm_Simple(&tCorrF,(double*)(pDest + is + iv * qcdNsite), 2*localsize[0]*localsize[1]*localsize[2]);
CorrF += tCorrF;
}
}
else{
CorrF = 0.0;
}
MPI_Allreduce(&CorrF,&rr,1,MPI_DOUBLE_PRECISION,MPI_SUM,MPI_COMM_WORLD);
pCorr[i] = pCorr[i] + rr;
}
}
}
if(myrank == 0){
printf("\nPs meson correlator:\n");
for(i=0;i<latsize[3];i++){
printf("%d: %0.8E\n",i,pCorr[i]);
}
printf("\n Avg. Solver Time = %f [sec]\n",ttotal / 12);
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
//debug
/* printf("finish\n"); */
return 0;
}
|
578a80c4589a0e85fbc75051ae90b545086a06ea.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/layers/elu_layer.hpp"
#include "HugeCTR/include/layers/element_wise_function.hpp"
#include <algorithm>
#include <functional>
#include "HugeCTR/include/utils.hpp"
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
EluLayer::EluLayer(Tensor<float>& in_tensor, Tensor<float>& out_tensor, float alpha, int device_id)
: Layer(device_id), alpha_(alpha) {
assert(get_size_from_dims(in_tensor.get_dims()) == get_size_from_dims(out_tensor.get_dims()));
in_tensors_.push_back(std::ref(in_tensor));
out_tensors_.push_back(std::ref(out_tensor));
}
void EluLayer::fprop(hipStream_t stream) {
const Tensor<float>& in_tensor = in_tensors_[0];
Tensor<float>& out_tensor = out_tensors_[0];
float alpha = alpha_;
auto fop = [alpha] __device__(float in) { return (in < 0) ? alpha * (expf(in) - 1) : in; };
internal::ElementWiseFunctor functor;
functor.forward_evaluate(in_tensor, out_tensor, get_device_id(), fop, stream);
}
void EluLayer::bprop(hipStream_t stream) {
Tensor<float>& in_tensor = in_tensors_[0];
const Tensor<float>& out_tensor = out_tensors_[0];
float alpha = alpha_;
auto bop = [alpha] __device__(float d_out, float d_in) {
return (d_in < 0) ? alpha * expf(d_in) * d_out : d_out;
};
internal::ElementWiseFunctor functor;
functor.backward_evaluate(in_tensor, out_tensor, get_device_id(), bop, stream);
}
} // namespace HugeCTR
| 578a80c4589a0e85fbc75051ae90b545086a06ea.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/layers/elu_layer.hpp"
#include "HugeCTR/include/layers/element_wise_function.hpp"
#include <algorithm>
#include <functional>
#include "HugeCTR/include/utils.hpp"
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
EluLayer::EluLayer(Tensor<float>& in_tensor, Tensor<float>& out_tensor, float alpha, int device_id)
: Layer(device_id), alpha_(alpha) {
assert(get_size_from_dims(in_tensor.get_dims()) == get_size_from_dims(out_tensor.get_dims()));
in_tensors_.push_back(std::ref(in_tensor));
out_tensors_.push_back(std::ref(out_tensor));
}
void EluLayer::fprop(cudaStream_t stream) {
const Tensor<float>& in_tensor = in_tensors_[0];
Tensor<float>& out_tensor = out_tensors_[0];
float alpha = alpha_;
auto fop = [alpha] __device__(float in) { return (in < 0) ? alpha * (expf(in) - 1) : in; };
internal::ElementWiseFunctor functor;
functor.forward_evaluate(in_tensor, out_tensor, get_device_id(), fop, stream);
}
void EluLayer::bprop(cudaStream_t stream) {
Tensor<float>& in_tensor = in_tensors_[0];
const Tensor<float>& out_tensor = out_tensors_[0];
float alpha = alpha_;
auto bop = [alpha] __device__(float d_out, float d_in) {
return (d_in < 0) ? alpha * expf(d_in) * d_out : d_out;
};
internal::ElementWiseFunctor functor;
functor.backward_evaluate(in_tensor, out_tensor, get_device_id(), bop, stream);
}
} // namespace HugeCTR
|
e57d97a0181dce8dc9043c0211eea643c84efdd5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//
#include <system/op_boilerplate.h>
#include <loops/reduce_same.h>
#include <loops/legacy_ops.h>
#include <helpers/DebugHelper.h>
#include <types/types.h>
#include <execution/LaunchContext.h>
#include <exceptions/cuda_exception.h>
#include <loops/scalar.h>
using namespace simdOps;
////////////////////////////////////////////////////////////////////////
template <typename X, typename OpType>
__global__ void simpleReduce(const void *x, const Nd4jLong *outerXTadShapeInfo, const Nd4jLong *innerXTadShapeInfo,
void *extraParams, void *vreductionBuffer, void *z, const Nd4jLong *zShapeInfo) {
functions::reduce::ReduceSameFunction<X>::template transformCudaXD<OpType>(x, outerXTadShapeInfo, innerXTadShapeInfo, extraParams, vreductionBuffer, z, zShapeInfo);
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename OpType>
__global__ void simpleScalar(void const* x, Nd4jLong const* xShapeInfo,
void *extraParams,
void *z, Nd4jLong const* zShapeInfo,
int *dimension, int dimensionLength,
void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo) {
functions::reduce::ReduceSameFunction<X>::template execScalarCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo);
}
namespace functions {
namespace reduce {
////////////////////////////////////////////////////////////////////////
template <typename X>
template <typename OpType>
__device__ void ReduceSameFunction<X>::aggregatePartials(void *vsPartials, Nd4jLong tid, Nd4jLong numItems, void *vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto sPartials = static_cast<X*>(vsPartials);
auto extraParams = static_cast<X*>(vextraParams);
Nd4jLong floorPow2 = numItems;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1))
floorPow2 &= floorPow2 - 1;
if (tid >= floorPow2)
sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams);
__syncthreads();
}
for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numItems)
sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams);
__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////
template <typename X>
template <typename OpType>
__device__ void ReduceSameFunction<X>::transformCudaXD(const void *vx, const Nd4jLong *outerXTadShapeInfo, const Nd4jLong *innerXTadShapeInfo,
void *vextraParams, void *vreductionBuffer,
void *vz, const Nd4jLong *zShapeInfo) {
auto x = reinterpret_cast<X const*>(vx);
auto z = reinterpret_cast<X*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
auto reductionBuffer = reinterpret_cast<X*>(vreductionBuffer);
// if (OpType::requiresSpecialAccumulation) {
// OpType::execSpecialCuda(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
// return;
// }
//shared memory space for storing intermediate results
__shared__ X sPartials[CUDA_BLOCK_SIZE];
__shared__ int tadLen, numTads;
__shared__ bool sameOffsets;
if (threadIdx.x == 0) {
sameOffsets = shape::haveSameShapeAndStrides(zShapeInfo, outerXTadShapeInfo);
tadLen = shape::length(innerXTadShapeInfo);
numTads = shape::length(outerXTadShapeInfo);
}
__syncthreads();
int coords[MAX_RANK];
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
shape::index2coords(r, outerXTadShapeInfo, coords);
const auto outerOffset = shape::getOffset(outerXTadShapeInfo, coords);
const auto zOffset = sameOffsets ? outerOffset : shape::getOffset(zShapeInfo, coords);
const X* xTad = x + outerOffset;
sPartials[threadIdx.x] = OpType::startingValue(xTad);
for (int i = threadIdx.x; i < tadLen; i += blockDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(xTad[shape::getIndexOffset(i, innerXTadShapeInfo)], extraParams), extraParams);
__syncthreads();
// aggregate. do NOT reduce for elements > tadLen
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, tadLen), extraParams);
__syncthreads();
if (threadIdx.x == 0)
z[zOffset] = OpType::postProcess(sPartials[threadIdx.x], tadLen, extraParams);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X>
__device__ void ReduceSameFunction<X>::execScalarCudaLegacy(int opNum, void const* vx, Nd4jLong const* xShapeInfo,
void *vextraParams,
void *vz, Nd4jLong const* zShapeInfo,
void *vreductionBuffer,
Nd4jLong const* tadOnlyShapeInfo) {
DISPATCH_BY_OPNUM_T(execScalarCuda, PARAMS(vx, xShapeInfo, vextraParams, vz, zShapeInfo, vreductionBuffer, tadOnlyShapeInfo), REDUCE_SAME_OPS);
}
////////////////////////////////////////////////////////////////////////
template <typename X>
template <typename OpType>
__device__ void ReduceSameFunction<X>::execScalarCuda(void const* vx, Nd4jLong const* xShapeInfo,
void *vextraParams,
void * vz, Nd4jLong const* zShapeInfo,
void *vreductionBuffer,
Nd4jLong const* tadOnlyShapeInfo) {
auto x = reinterpret_cast<X const*>(vx);
auto z = reinterpret_cast<X*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
auto reductionBuffer = reinterpret_cast<X*>(vreductionBuffer);
auto tid = blockDim.x * blockIdx.x + threadIdx.x;
//shared memory space for storing intermediate results
__shared__ X sPartials[CUDA_BLOCK_SIZE];
__shared__ Nd4jLong xEws;
__shared__ Nd4jLong len;
if(threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
len = shape::length(xShapeInfo);
}
__syncthreads();
sPartials[threadIdx.x] = OpType::startingValue(x);
if (xEws > 0)
for (int i = tid; i < len; i += (blockDim.x * gridDim.x))
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams);
else
for (int i = tid; i < len; i += blockDim.x * gridDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo)], extraParams), extraParams);
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, len), extraParams);
__syncthreads();
if (gridDim.x > 1) {
unsigned int *tc = (unsigned int *)reductionBuffer;
__shared__ bool amLast;
tid = threadIdx.x;
if (threadIdx.x == 0)
reductionBuffer[blockIdx.x] = sPartials[0];//this->postProcess(sPartials[0],len,extraParams);
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
sPartials[threadIdx.x] = OpType::startingValue(x);
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams);
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(gridDim.x, blockDim.x), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[0] = OpType::postProcess(sPartials[0], len, extraParams);
}
}
}
else {
if (threadIdx.x == 0) {
auto tc = reinterpret_cast<unsigned int *>(reductionBuffer);
tc[16384] = 0;
z[0] = OpType::postProcess(sPartials[0], len, extraParams);
}
}
}
////////////////////////////////////////////////////////////////////////
template <typename X>
template<typename OpType>
__host__ void ReduceSameFunction<X>::intermediateXD(dim3 launchDims, hipStream_t *stream,
const void *x, const Nd4jLong *dXShapeInfo, const Nd4jLong *hXShapeInfo,
void *extraParams, void *vreductionBuffer,
void *z, const Nd4jLong *dZShapeInfo, const Nd4jLong *hZShapeInfo, const int* dims) {
if(shape::isEmpty(hXShapeInfo)) {
if(shape::isEmpty(hZShapeInfo))
return;
const auto startingVal = static_cast<X>(OpType::startingValue(reinterpret_cast<const X*>(x)));
auto res = hipMemcpyAsync(sd::LaunchContext::defaultContext()->getScalarPointer(), &startingVal, sizeof(X), hipMemcpyHostToDevice, *stream);
if (res != 0)
throw sd::cuda_exception::build("ReduceSameFunction<X,Z>::intermediateXD: failed to copy temporary scalar", res);
auto ptr = sd::LaunchContext::defaultContext()->getScalarPointer();
// scalar assign
functions::scalar::ScalarTransform<X, X, X>::executeCudaShaped(launchDims, stream, 14, z, dZShapeInfo, hXShapeInfo, z, dZShapeInfo, hZShapeInfo, ptr, nullptr);
}
else {
const int zRank = shape::rank(hZShapeInfo);
const int tadRank = shape::rank(hXShapeInfo) - zRank;
auto outerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims, zRank);
auto innerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims+zRank, tadRank);
hipLaunchKernelGGL(( simpleReduce<X, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, reinterpret_cast<Nd4jLong const*>(outerPack.special()), reinterpret_cast<Nd4jLong const*>(innerPack.special()), extraParams, vreductionBuffer, z, dZShapeInfo);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X>
template<typename OpType>
__host__ void ReduceSameFunction<X>::intermediateScalar(dim3 launchDims, hipStream_t *stream, void const* x, Nd4jLong const* xShapeInfo, Nd4jLong const* hXShapeInfo, void *extraParams, void *z, Nd4jLong const* zShapeInfo, Nd4jLong const* hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo) {
if (shape::isEmpty(hXShapeInfo)) {
if (shape::isEmpty(hZShapeInfo))
return;
const auto startingVal = static_cast<X>(OpType::startingValue(reinterpret_cast<const X*>(x)));
auto res = hipMemcpyAsync(z, &startingVal, sizeof(X), hipMemcpyHostToDevice, *stream);
if (res != 0)
throw sd::cuda_exception::build("ReduceSameFunction<X>::intermediateScalar: failed to copy resulting scalar", res);
}
else {
hipLaunchKernelGGL(( simpleScalar<X, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X>
_CUDA_H void ReduceSameFunction<X>::execReduceScalar(dim3 launchDims, hipStream_t *stream, int opNum, void const* x, Nd4jLong const* xShapeInfo, Nd4jLong const* hXShapeInfo, void *extraParams, void *z, Nd4jLong const* zShapeInfo, Nd4jLong const* hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo) {
DISPATCH_BY_OPNUM_T(intermediateScalar, PARAMS(launchDims, stream, x, xShapeInfo, hXShapeInfo, extraParams, z, zShapeInfo, hZShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), REDUCE_SAME_OPS);
sd::DebugHelper::checkErrorCode(stream, "execReduceScalarSame(...) failed");
}
////////////////////////////////////////////////////////////////////////
template <typename X>
_CUDA_H void ReduceSameFunction<X>::execReduceXD(dim3 launchDims, hipStream_t *stream, const int opNum,
const void *x, const Nd4jLong *dXShapeInfo, const Nd4jLong *hXShapeInfo,
void *extraParams, void *vreductionBuffer,
void *z, const Nd4jLong *dZShapeInfo, const Nd4jLong *hZShapeInfo, const int *dims) {
if(shape::length(hZShapeInfo) == 1) {
ReduceSameFunction<X>::execReduceScalar(launchDims, stream, opNum, x, dXShapeInfo, hXShapeInfo, extraParams, z, dZShapeInfo, hZShapeInfo, nullptr, 0, vreductionBuffer, nullptr);
}
else {
DISPATCH_BY_OPNUM_T(intermediateXD, PARAMS(launchDims, stream, x, dXShapeInfo, hXShapeInfo, extraParams, vreductionBuffer, z, dZShapeInfo, hZShapeInfo, dims), REDUCE_SAME_OPS);
}
DEBUG_KERNEL(stream, opNum);
}
////////////////////////////////////////////////////////////////////////
template <typename X>
__device__ void initializeShared(X *extraParams, X **sPartials, int sMemSize) {
int sPartialsLength = sMemSize / sizeof(X);
X *sPartialsDeref = (X *) *sPartials;
for (int i = 0; i < sPartialsLength; i++)
sPartialsDeref[i] = extraParams[0];
}
BUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT ReduceSameFunction, , LIBND4J_TYPES);
}
} | e57d97a0181dce8dc9043c0211eea643c84efdd5.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//
#include <system/op_boilerplate.h>
#include <loops/reduce_same.h>
#include <loops/legacy_ops.h>
#include <helpers/DebugHelper.h>
#include <types/types.h>
#include <execution/LaunchContext.h>
#include <exceptions/cuda_exception.h>
#include <loops/scalar.h>
using namespace simdOps;
////////////////////////////////////////////////////////////////////////
template <typename X, typename OpType>
__global__ void simpleReduce(const void *x, const Nd4jLong *outerXTadShapeInfo, const Nd4jLong *innerXTadShapeInfo,
void *extraParams, void *vreductionBuffer, void *z, const Nd4jLong *zShapeInfo) {
functions::reduce::ReduceSameFunction<X>::template transformCudaXD<OpType>(x, outerXTadShapeInfo, innerXTadShapeInfo, extraParams, vreductionBuffer, z, zShapeInfo);
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename OpType>
__global__ void simpleScalar(void const* x, Nd4jLong const* xShapeInfo,
void *extraParams,
void *z, Nd4jLong const* zShapeInfo,
int *dimension, int dimensionLength,
void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo) {
functions::reduce::ReduceSameFunction<X>::template execScalarCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo);
}
namespace functions {
namespace reduce {
////////////////////////////////////////////////////////////////////////
template <typename X>
template <typename OpType>
__device__ void ReduceSameFunction<X>::aggregatePartials(void *vsPartials, Nd4jLong tid, Nd4jLong numItems, void *vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto sPartials = static_cast<X*>(vsPartials);
auto extraParams = static_cast<X*>(vextraParams);
Nd4jLong floorPow2 = numItems;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1))
floorPow2 &= floorPow2 - 1;
if (tid >= floorPow2)
sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams);
__syncthreads();
}
for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numItems)
sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams);
__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////
template <typename X>
template <typename OpType>
__device__ void ReduceSameFunction<X>::transformCudaXD(const void *vx, const Nd4jLong *outerXTadShapeInfo, const Nd4jLong *innerXTadShapeInfo,
void *vextraParams, void *vreductionBuffer,
void *vz, const Nd4jLong *zShapeInfo) {
auto x = reinterpret_cast<X const*>(vx);
auto z = reinterpret_cast<X*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
auto reductionBuffer = reinterpret_cast<X*>(vreductionBuffer);
// if (OpType::requiresSpecialAccumulation) {
// OpType::execSpecialCuda(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
// return;
// }
//shared memory space for storing intermediate results
__shared__ X sPartials[CUDA_BLOCK_SIZE];
__shared__ int tadLen, numTads;
__shared__ bool sameOffsets;
if (threadIdx.x == 0) {
sameOffsets = shape::haveSameShapeAndStrides(zShapeInfo, outerXTadShapeInfo);
tadLen = shape::length(innerXTadShapeInfo);
numTads = shape::length(outerXTadShapeInfo);
}
__syncthreads();
int coords[MAX_RANK];
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
shape::index2coords(r, outerXTadShapeInfo, coords);
const auto outerOffset = shape::getOffset(outerXTadShapeInfo, coords);
const auto zOffset = sameOffsets ? outerOffset : shape::getOffset(zShapeInfo, coords);
const X* xTad = x + outerOffset;
sPartials[threadIdx.x] = OpType::startingValue(xTad);
for (int i = threadIdx.x; i < tadLen; i += blockDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(xTad[shape::getIndexOffset(i, innerXTadShapeInfo)], extraParams), extraParams);
__syncthreads();
// aggregate. do NOT reduce for elements > tadLen
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, tadLen), extraParams);
__syncthreads();
if (threadIdx.x == 0)
z[zOffset] = OpType::postProcess(sPartials[threadIdx.x], tadLen, extraParams);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X>
__device__ void ReduceSameFunction<X>::execScalarCudaLegacy(int opNum, void const* vx, Nd4jLong const* xShapeInfo,
void *vextraParams,
void *vz, Nd4jLong const* zShapeInfo,
void *vreductionBuffer,
Nd4jLong const* tadOnlyShapeInfo) {
DISPATCH_BY_OPNUM_T(execScalarCuda, PARAMS(vx, xShapeInfo, vextraParams, vz, zShapeInfo, vreductionBuffer, tadOnlyShapeInfo), REDUCE_SAME_OPS);
}
////////////////////////////////////////////////////////////////////////
template <typename X>
template <typename OpType>
__device__ void ReduceSameFunction<X>::execScalarCuda(void const* vx, Nd4jLong const* xShapeInfo,
void *vextraParams,
void * vz, Nd4jLong const* zShapeInfo,
void *vreductionBuffer,
Nd4jLong const* tadOnlyShapeInfo) {
auto x = reinterpret_cast<X const*>(vx);
auto z = reinterpret_cast<X*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
auto reductionBuffer = reinterpret_cast<X*>(vreductionBuffer);
auto tid = blockDim.x * blockIdx.x + threadIdx.x;
//shared memory space for storing intermediate results
__shared__ X sPartials[CUDA_BLOCK_SIZE];
__shared__ Nd4jLong xEws;
__shared__ Nd4jLong len;
if(threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
len = shape::length(xShapeInfo);
}
__syncthreads();
sPartials[threadIdx.x] = OpType::startingValue(x);
if (xEws > 0)
for (int i = tid; i < len; i += (blockDim.x * gridDim.x))
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams);
else
for (int i = tid; i < len; i += blockDim.x * gridDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo)], extraParams), extraParams);
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, len), extraParams);
__syncthreads();
if (gridDim.x > 1) {
unsigned int *tc = (unsigned int *)reductionBuffer;
__shared__ bool amLast;
tid = threadIdx.x;
if (threadIdx.x == 0)
reductionBuffer[blockIdx.x] = sPartials[0];//this->postProcess(sPartials[0],len,extraParams);
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
sPartials[threadIdx.x] = OpType::startingValue(x);
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams);
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(gridDim.x, blockDim.x), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[0] = OpType::postProcess(sPartials[0], len, extraParams);
}
}
}
else {
if (threadIdx.x == 0) {
auto tc = reinterpret_cast<unsigned int *>(reductionBuffer);
tc[16384] = 0;
z[0] = OpType::postProcess(sPartials[0], len, extraParams);
}
}
}
////////////////////////////////////////////////////////////////////////
template <typename X>
template<typename OpType>
__host__ void ReduceSameFunction<X>::intermediateXD(dim3 launchDims, cudaStream_t *stream,
const void *x, const Nd4jLong *dXShapeInfo, const Nd4jLong *hXShapeInfo,
void *extraParams, void *vreductionBuffer,
void *z, const Nd4jLong *dZShapeInfo, const Nd4jLong *hZShapeInfo, const int* dims) {
if(shape::isEmpty(hXShapeInfo)) {
if(shape::isEmpty(hZShapeInfo))
return;
const auto startingVal = static_cast<X>(OpType::startingValue(reinterpret_cast<const X*>(x)));
auto res = cudaMemcpyAsync(sd::LaunchContext::defaultContext()->getScalarPointer(), &startingVal, sizeof(X), cudaMemcpyHostToDevice, *stream);
if (res != 0)
throw sd::cuda_exception::build("ReduceSameFunction<X,Z>::intermediateXD: failed to copy temporary scalar", res);
auto ptr = sd::LaunchContext::defaultContext()->getScalarPointer();
// scalar assign
functions::scalar::ScalarTransform<X, X, X>::executeCudaShaped(launchDims, stream, 14, z, dZShapeInfo, hXShapeInfo, z, dZShapeInfo, hZShapeInfo, ptr, nullptr);
}
else {
const int zRank = shape::rank(hZShapeInfo);
const int tadRank = shape::rank(hXShapeInfo) - zRank;
auto outerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims, zRank);
auto innerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims+zRank, tadRank);
simpleReduce<X, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, reinterpret_cast<Nd4jLong const*>(outerPack.special()), reinterpret_cast<Nd4jLong const*>(innerPack.special()), extraParams, vreductionBuffer, z, dZShapeInfo);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X>
template<typename OpType>
__host__ void ReduceSameFunction<X>::intermediateScalar(dim3 launchDims, cudaStream_t *stream, void const* x, Nd4jLong const* xShapeInfo, Nd4jLong const* hXShapeInfo, void *extraParams, void *z, Nd4jLong const* zShapeInfo, Nd4jLong const* hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo) {
if (shape::isEmpty(hXShapeInfo)) {
if (shape::isEmpty(hZShapeInfo))
return;
const auto startingVal = static_cast<X>(OpType::startingValue(reinterpret_cast<const X*>(x)));
auto res = cudaMemcpyAsync(z, &startingVal, sizeof(X), cudaMemcpyHostToDevice, *stream);
if (res != 0)
throw sd::cuda_exception::build("ReduceSameFunction<X>::intermediateScalar: failed to copy resulting scalar", res);
}
else {
simpleScalar<X, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X>
_CUDA_H void ReduceSameFunction<X>::execReduceScalar(dim3 launchDims, cudaStream_t *stream, int opNum, void const* x, Nd4jLong const* xShapeInfo, Nd4jLong const* hXShapeInfo, void *extraParams, void *z, Nd4jLong const* zShapeInfo, Nd4jLong const* hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo) {
DISPATCH_BY_OPNUM_T(intermediateScalar, PARAMS(launchDims, stream, x, xShapeInfo, hXShapeInfo, extraParams, z, zShapeInfo, hZShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), REDUCE_SAME_OPS);
sd::DebugHelper::checkErrorCode(stream, "execReduceScalarSame(...) failed");
}
////////////////////////////////////////////////////////////////////////
template <typename X>
_CUDA_H void ReduceSameFunction<X>::execReduceXD(dim3 launchDims, cudaStream_t *stream, const int opNum,
const void *x, const Nd4jLong *dXShapeInfo, const Nd4jLong *hXShapeInfo,
void *extraParams, void *vreductionBuffer,
void *z, const Nd4jLong *dZShapeInfo, const Nd4jLong *hZShapeInfo, const int *dims) {
if(shape::length(hZShapeInfo) == 1) {
ReduceSameFunction<X>::execReduceScalar(launchDims, stream, opNum, x, dXShapeInfo, hXShapeInfo, extraParams, z, dZShapeInfo, hZShapeInfo, nullptr, 0, vreductionBuffer, nullptr);
}
else {
DISPATCH_BY_OPNUM_T(intermediateXD, PARAMS(launchDims, stream, x, dXShapeInfo, hXShapeInfo, extraParams, vreductionBuffer, z, dZShapeInfo, hZShapeInfo, dims), REDUCE_SAME_OPS);
}
DEBUG_KERNEL(stream, opNum);
}
////////////////////////////////////////////////////////////////////////
template <typename X>
__device__ void initializeShared(X *extraParams, X **sPartials, int sMemSize) {
int sPartialsLength = sMemSize / sizeof(X);
X *sPartialsDeref = (X *) *sPartials;
for (int i = 0; i < sPartialsLength; i++)
sPartialsDeref[i] = extraParams[0];
}
BUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT ReduceSameFunction, , LIBND4J_TYPES);
}
} |
97dac78dacc17585112af10e4b699b8c554208ef.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "MakeSplits_Large.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int baseAggregateIdx = 1;
int *splitting = NULL;
hipMalloc(&splitting, XSIZE*YSIZE);
int *aggregation = NULL;
hipMalloc(&aggregation, XSIZE*YSIZE);
int *aggMapAdjIndices = NULL;
hipMalloc(&aggMapAdjIndices, XSIZE*YSIZE);
int *aggMapAdjacency = NULL;
hipMalloc(&aggMapAdjacency, XSIZE*YSIZE);
int *adjIndices = NULL;
hipMalloc(&adjIndices, XSIZE*YSIZE);
int *adjacency = NULL;
hipMalloc(&adjacency, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
MakeSplits_Large), dim3(gridBlock),dim3(threadBlock), 0, 0, baseAggregateIdx,splitting,aggregation,aggMapAdjIndices,aggMapAdjacency,adjIndices,adjacency);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
MakeSplits_Large), dim3(gridBlock),dim3(threadBlock), 0, 0, baseAggregateIdx,splitting,aggregation,aggMapAdjIndices,aggMapAdjacency,adjIndices,adjacency);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
MakeSplits_Large), dim3(gridBlock),dim3(threadBlock), 0, 0, baseAggregateIdx,splitting,aggregation,aggMapAdjIndices,aggMapAdjacency,adjIndices,adjacency);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 97dac78dacc17585112af10e4b699b8c554208ef.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "MakeSplits_Large.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int baseAggregateIdx = 1;
int *splitting = NULL;
cudaMalloc(&splitting, XSIZE*YSIZE);
int *aggregation = NULL;
cudaMalloc(&aggregation, XSIZE*YSIZE);
int *aggMapAdjIndices = NULL;
cudaMalloc(&aggMapAdjIndices, XSIZE*YSIZE);
int *aggMapAdjacency = NULL;
cudaMalloc(&aggMapAdjacency, XSIZE*YSIZE);
int *adjIndices = NULL;
cudaMalloc(&adjIndices, XSIZE*YSIZE);
int *adjacency = NULL;
cudaMalloc(&adjacency, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
MakeSplits_Large<<<gridBlock,threadBlock>>>(baseAggregateIdx,splitting,aggregation,aggMapAdjIndices,aggMapAdjacency,adjIndices,adjacency);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
MakeSplits_Large<<<gridBlock,threadBlock>>>(baseAggregateIdx,splitting,aggregation,aggMapAdjIndices,aggMapAdjacency,adjIndices,adjacency);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
MakeSplits_Large<<<gridBlock,threadBlock>>>(baseAggregateIdx,splitting,aggregation,aggMapAdjIndices,aggMapAdjacency,adjIndices,adjacency);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b0899698ac99af57e8a95e33878fbdeab64f1c9e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
//-----------------------------------------------------------------------------
// GpuConstantsPackage: a struct to hold many constants (including pointers
// to allocated memory on the device) that can be
// uploaded all at once. Placing this in the "constants
// cache" is a convenient and performant way of handling
// constant information on the GPU.
//-----------------------------------------------------------------------------
struct GpuConstantsPackage {
int nvalue;
int* values;
};
typedef struct GpuConstantsPackage dataPack;
// This device constant is available to all functions in this CUDA unit
__device__ __constant__ dataPack dPk;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored int data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredInt {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
int* HostData; // Pointer to allocated memory on the host
int* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredInt gpuInt;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored fp32 data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredFloat {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
float* HostData; // Pointer to allocated memory on the host
float* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredFloat gpuFloat;
//-----------------------------------------------------------------------------
// kWarpPrefixSum: kernel for making a prefix sum of 32 numbers
//-----------------------------------------------------------------------------
__global__ void kWarpPrefixSum()
{
if (threadIdx.x == 0) {
printf("Values =\n");
int i, j;
for (i = 0; i < 4; i++) {
printf(" ");
for (j = 8*i; j < 8*(i+1); j++) {
printf("%4d ", dPk.values[j]);
}
printf(" [ slots %2d - %2d ]\n", 8*i, 8*(i+1)-1);
}
}
}
//-----------------------------------------------------------------------------
// CreateGpuInt: constructor function for allocating memory in a gpuInt
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed ot the device)
//-----------------------------------------------------------------------------
gpuInt CreateGpuInt(int len, int pin)
{
gpuInt G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
hipHostMalloc((void **)&G.HostData, len * sizeof(int),
hipHostMallocMapped);
}
else {
G.HostData = (int*)malloc(len * sizeof(int));
}
hipMalloc((void **)&G.DevcData, len * sizeof(int));
memset(G.HostData, 0, len * sizeof(int));
hipMemset((void *)G.DevcData, 0, len * sizeof(int));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuInt: destructor function for freeing memory in a gpuInt
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuInt(gpuInt *G)
{
if (G->IsPinned == 1) {
hipHostFree(G->HostData);
}
else {
free(G->HostData);
}
hipFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuInt: upload an integer array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuInt(gpuInt *G)
{
hipMemcpy(G->DevcData, G->HostData, G->len * sizeof(int),
hipMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuInt: download an integer array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuInt(gpuInt *G)
{
hipMemcpy(G->HostData, G->DevcData, G->len * sizeof(int),
hipMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// CreateGpuFloat: constructor function for allocating memory in a gpuFloat
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed ot the device)
//-----------------------------------------------------------------------------
gpuFloat CreateGpuFloat(int len, int pin)
{
gpuFloat G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
hipHostMalloc((void **)&G.HostData, len * sizeof(float),
hipHostMallocMapped);
}
else {
G.HostData = (float*)malloc(len * sizeof(float));
}
hipMalloc((void **)&G.DevcData, len * sizeof(float));
memset(G.HostData, 0, len * sizeof(float));
hipMemset((void *)G.DevcData, 0, len * sizeof(float));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuFloat: destructor function for freeing memory in a gpuFloat
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuFloat(gpuFloat *G)
{
if (G->IsPinned == 1) {
hipHostFree(G->HostData);
}
else {
free(G->HostData);
}
hipFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuFloat: upload an float array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuFloat(gpuFloat *G)
{
hipMemcpy(G->DevcData, G->HostData, G->len * sizeof(float),
hipMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuFloat: download an float array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuFloat(gpuFloat *G)
{
hipMemcpy(G->HostData, G->DevcData, G->len * sizeof(float),
hipMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
int i, np;
gpuInt ivals;
// Create a small array of integers and populate it
ivals = CreateGpuInt(32, 1);
// Initialize random number generator
srand(29538);
// Create random numbers
np = 32;
for (i = 0; i < np; i++) {
ivals.HostData[i] = (int)(100 * (double)rand() / (double)RAND_MAX);
}
// Stage critical constants--see cribSheet struct instance cSh above.
dataPack dpstage;
dpstage.nvalue = np;
dpstage.values = ivals.DevcData;
// Upload all data to the device
UploadGpuInt(&ivals);
// Upload the constants to the constants cache
hipMemcpyToSymbol(dPk, &dpstage, sizeof(dataPack));
// Launch the kernel in more than one block
hipLaunchKernelGGL(( kWarpPrefixSum), dim3(1), dim3(32), 0, 0, );
// Device synchronization
hipDeviceSynchronize();
return 0;
}
| b0899698ac99af57e8a95e33878fbdeab64f1c9e.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
//-----------------------------------------------------------------------------
// GpuConstantsPackage: a struct to hold many constants (including pointers
// to allocated memory on the device) that can be
// uploaded all at once. Placing this in the "constants
// cache" is a convenient and performant way of handling
// constant information on the GPU.
//-----------------------------------------------------------------------------
struct GpuConstantsPackage {
int nvalue;
int* values;
};
typedef struct GpuConstantsPackage dataPack;
// This device constant is available to all functions in this CUDA unit
__device__ __constant__ dataPack dPk;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored int data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredInt {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
int* HostData; // Pointer to allocated memory on the host
int* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredInt gpuInt;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored fp32 data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredFloat {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
float* HostData; // Pointer to allocated memory on the host
float* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredFloat gpuFloat;
//-----------------------------------------------------------------------------
// kWarpPrefixSum: kernel for making a prefix sum of 32 numbers
//-----------------------------------------------------------------------------
__global__ void kWarpPrefixSum()
{
if (threadIdx.x == 0) {
printf("Values =\n");
int i, j;
for (i = 0; i < 4; i++) {
printf(" ");
for (j = 8*i; j < 8*(i+1); j++) {
printf("%4d ", dPk.values[j]);
}
printf(" [ slots %2d - %2d ]\n", 8*i, 8*(i+1)-1);
}
}
}
//-----------------------------------------------------------------------------
// CreateGpuInt: constructor function for allocating memory in a gpuInt
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed ot the device)
//-----------------------------------------------------------------------------
gpuInt CreateGpuInt(int len, int pin)
{
gpuInt G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
cudaHostAlloc((void **)&G.HostData, len * sizeof(int),
cudaHostAllocMapped);
}
else {
G.HostData = (int*)malloc(len * sizeof(int));
}
cudaMalloc((void **)&G.DevcData, len * sizeof(int));
memset(G.HostData, 0, len * sizeof(int));
cudaMemset((void *)G.DevcData, 0, len * sizeof(int));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuInt: destructor function for freeing memory in a gpuInt
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuInt(gpuInt *G)
{
if (G->IsPinned == 1) {
cudaFreeHost(G->HostData);
}
else {
free(G->HostData);
}
cudaFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuInt: upload an integer array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuInt(gpuInt *G)
{
cudaMemcpy(G->DevcData, G->HostData, G->len * sizeof(int),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuInt: download an integer array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuInt(gpuInt *G)
{
cudaMemcpy(G->HostData, G->DevcData, G->len * sizeof(int),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// CreateGpuFloat: constructor function for allocating memory in a gpuFloat
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed ot the device)
//-----------------------------------------------------------------------------
gpuFloat CreateGpuFloat(int len, int pin)
{
gpuFloat G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
cudaHostAlloc((void **)&G.HostData, len * sizeof(float),
cudaHostAllocMapped);
}
else {
G.HostData = (float*)malloc(len * sizeof(float));
}
cudaMalloc((void **)&G.DevcData, len * sizeof(float));
memset(G.HostData, 0, len * sizeof(float));
cudaMemset((void *)G.DevcData, 0, len * sizeof(float));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuFloat: destructor function for freeing memory in a gpuFloat
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuFloat(gpuFloat *G)
{
if (G->IsPinned == 1) {
cudaFreeHost(G->HostData);
}
else {
free(G->HostData);
}
cudaFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuFloat: upload an float array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuFloat(gpuFloat *G)
{
cudaMemcpy(G->DevcData, G->HostData, G->len * sizeof(float),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuFloat: download an float array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuFloat(gpuFloat *G)
{
cudaMemcpy(G->HostData, G->DevcData, G->len * sizeof(float),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
int i, np;
gpuInt ivals;
// Create a small array of integers and populate it
ivals = CreateGpuInt(32, 1);
// Initialize random number generator
srand(29538);
// Create random numbers
np = 32;
for (i = 0; i < np; i++) {
ivals.HostData[i] = (int)(100 * (double)rand() / (double)RAND_MAX);
}
// Stage critical constants--see cribSheet struct instance cSh above.
dataPack dpstage;
dpstage.nvalue = np;
dpstage.values = ivals.DevcData;
// Upload all data to the device
UploadGpuInt(&ivals);
// Upload the constants to the constants cache
cudaMemcpyToSymbol(dPk, &dpstage, sizeof(dataPack));
// Launch the kernel in more than one block
kWarpPrefixSum<<<1, 32>>>();
// Device synchronization
cudaDeviceSynchronize();
return 0;
}
|
beff458690822cc061607e8d6d92ed56f85b7ad6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void column_sum(const float* data, float* sum, int nx, int ny, int num_threads, int offset ) {
float s = 0.0;
const uint idx = threadIdx.x + blockIdx.x*num_threads+offset;
for(int i =0; i < ny; i++) {
s += data[idx + i*nx];
}
sum[idx] = s;
} | beff458690822cc061607e8d6d92ed56f85b7ad6.cu | #include "includes.h"
__global__ void column_sum(const float* data, float* sum, int nx, int ny, int num_threads, int offset ) {
float s = 0.0;
const uint idx = threadIdx.x + blockIdx.x*num_threads+offset;
for(int i =0; i < ny; i++) {
s += data[idx + i*nx];
}
sum[idx] = s;
} |
aa436d38c524572e605193353ac077195309553f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "shared_memory.cuh"
__global__ void staticReverse(int *d, int n)
{
__shared__ int s[64];
int t = threadIdx.x;
int tr = n-t-1;
s[t] = d[t];
__syncthreads();
d[t] = s[tr];
}
__global__ void dynamicReverse(int *d, int n)
{
extern __shared__ int s[];
int t = threadIdx.x;
int tr = n-t-1;
s[t] = d[t];
__syncthreads();
d[t] = s[tr];
}
| aa436d38c524572e605193353ac077195309553f.cu | #include "shared_memory.cuh"
__global__ void staticReverse(int *d, int n)
{
__shared__ int s[64];
int t = threadIdx.x;
int tr = n-t-1;
s[t] = d[t];
__syncthreads();
d[t] = s[tr];
}
__global__ void dynamicReverse(int *d, int n)
{
extern __shared__ int s[];
int t = threadIdx.x;
int tr = n-t-1;
s[t] = d[t];
__syncthreads();
d[t] = s[tr];
}
|
933eb759365c73b515c71b433e0974efbc19402c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
This code is a modified version of NVidia's degridding code:
https://github.com/SKA-ScienceDataProcessor/GPUDegrid/blob/master/degrid_gpu.cu
*/
#include "common.h"
#include "metrix.h"
#include "OskarBinReader.h"
template <int over, bool isbig> struct degridder {};
template <int over> struct degridder<over, true>{
static __device__ __inline__
//__launch_bounds__(256, 6)
// double2 is enough for 'in'
void degrid_kernel(complexd* out, const double3* in, size_t npts, const complexd* img,
size_t img_dim, const complexd* gcf, int gcf_dim) {
//TODO remove hard-coded 32
for (int n = 32*blockIdx.x; n<npts; n+= 32*gridDim.x) {
for (int q=threadIdx.y;q<32;q+=blockDim.y) {
double3 inn = in[n+q];
int sub_x = floorf(over*(inn.x-floorf(inn.x)));
int sub_y = floorf(over*(inn.y-floorf(inn.y)));
int main_x = floorf(inn.x);
int main_y = floorf(inn.y);
double sum_r = 0.0;
double sum_i = 0.0;
for(int a = threadIdx.x-gcf_dim/2;a<(gcf_dim+1)/2;a+=blockDim.x)
for(int b = -gcf_dim/2;b<(gcf_dim+1)/2;b++)
{
//auto this_img = img[main_x+a+img_dim*(main_y+b)];
//auto r1 = this_img.x;
//auto i1 = this_img.y;
auto r1 = img[main_x+a+img_dim*(main_y+b)].x;
auto i1 = img[main_x+a+img_dim*(main_y+b)].y;
if (main_x+a < 0 || main_y+b < 0 ||
main_x+a >= img_dim || main_y+b >= img_dim) {
r1=i1=0.0;
}
//auto this_gcf = __ldg(&gcf[gcf_dim*gcf_dim*(OVER*sub_y+sub_x) +
// gcf_dim*b+a]);
//auto r2 = this_gcf.x;
//auto i2 = this_gcf.y;
auto r2 = __ldg(&gcf[gcf_dim*gcf_dim*(over*sub_y+sub_x) +
gcf_dim*b+a].x);
auto i2 = __ldg(&gcf[gcf_dim*gcf_dim*(over*sub_y+sub_x) +
gcf_dim*b+a].y);
sum_r += r1*r2 - i1*i2;
sum_i += r1*i2 + r2*i1;
}
for(unsigned int s = blockDim.x < 16 ? blockDim.x : 16; s>0;s/=2) {
sum_r += __shfl_down(sum_r,s);
sum_i += __shfl_down(sum_i,s);
}
complexd tmp;
tmp.x = sum_r;
tmp.y = sum_i;
if (threadIdx.x == 0) {
// Add rotation
out[n+q] = rotw(tmp, inn.z);
}
}
}
}
};
template <int over> struct degridder<over, false>{
static __device__ __inline__
//__launch_bounds__(256, 6)
void degrid_kernel(complexd* out, const double3* in, size_t npts, const complexd* img,
size_t img_dim, const complexd* gcf, int gcf_dim) {
//TODO remove hard-coded 32
for (int n = 32*blockIdx.x; n<npts; n+= 32*gridDim.x) {
for (int q=threadIdx.y;q<32;q+=blockDim.y) {
double3 inn = in[n+q];
int sub_x = floorf(over*(inn.x-floorf(inn.x)));
int sub_y = floorf(over*(inn.y-floorf(inn.y)));
int main_x = floorf(inn.x);
int main_y = floorf(inn.y);
double sum_r = 0.0;
double sum_i = 0.0;
int a = -gcf_dim/2 + int(threadIdx.x)%gcf_dim;
for(int b = -gcf_dim/2+int(threadIdx.x)/gcf_dim;b<gcf_dim/2;b+=blockDim.x/gcf_dim)
{
//auto this_img = img[main_x+a+img_dim*(main_y+b)];
//auto r1 = this_img.x;
//auto i1 = this_img.y;
auto r1 = img[main_x+a+img_dim*(main_y+b)].x;
auto i1 = img[main_x+a+img_dim*(main_y+b)].y;
if (main_x+a < 0 || main_y+b < 0 ||
main_x+a >= img_dim || main_y+b >= img_dim) {
r1=i1=0.0;
}
//auto this_gcf = __ldg(&gcf[gcf_dim*gcf_dim*(OVER*sub_y+sub_x) +
// gcf_dim*b+a]);
//auto r2 = this_gcf.x;
//auto i2 = this_gcf.y;
auto r2 = __ldg(&gcf[gcf_dim*gcf_dim*(over*sub_y+sub_x) +
gcf_dim*b+a].x);
auto i2 = __ldg(&gcf[gcf_dim*gcf_dim*(over*sub_y+sub_x) +
gcf_dim*b+a].y);
sum_r += r1*r2 - i1*i2;
sum_i += r1*i2 + r2*i1;
}
for(unsigned int s = blockDim.x < 16 ? blockDim.x : 16; s>0;s/=2) {
sum_r += __shfl_down(sum_r,s);
sum_i += __shfl_down(sum_i,s);
}
complexd tmp;
tmp.x = sum_r;
tmp.y = sum_i;
if (threadIdx.x == 0) {
// Add rotation
out[n+q] = rotw(tmp, inn.z);
}
}
}
}
};
#define __PADAPI static __inline__ __host__ __device__
template <int max_gcf_size> struct degridPadder {
__PADAPI int full_padded_size(int bare_lin_size){
return bare_lin_size*(bare_lin_size+2*max_gcf_size)+2*max_gcf_size;
}
__PADAPI int bare_offset(int bare_lin_size){
return (bare_lin_size+1)*max_gcf_size;
}
};
template <
int max_gcf_dim
, int over
, bool isBig
>
__global__
void degridGPU(
const BlWMap permutations[/* baselines */]
, complexd * out_vis
// Padded, points to the beginning
// of physical data
, const double3 * uvw
// centered in 0 w-plane
, const complexd * gcf[]
, const complexd * img
, size_t timesteps_x_channels
, size_t img_dim
, int blOff
) {
// Temporarily use the second treadblock-grid dimension as baseline dimension
int bl = permutations[blockIdx.y + blOff].bl;
int gcf_dim = get_supp(permutations[bl].wp);
degridder<over, isBig>::degrid_kernel(
out_vis + bl*timesteps_x_channels
, uvw + bl*timesteps_x_channels
, timesteps_x_channels
, img+degridPadder<max_gcf_dim>::bare_offset(img_dim)
, img_dim
, gcf[permutations[bl].wp]
, gcf_dim
);
}
// We assume permutations vector
// is sorted against abs(w_plane_index) compare function
template <
int max_gcf_dim
, int over
>
void degridGPU(
const BlWMap permutations[/* baselines */]
, complexd* out_vis
// Padded, points to the beginning
// of physical data
, const double3 * uvw
// centered in 0 w-plane
, const complexd * gcf[]
, const complexd * img
, int baselines
, int timesteps_x_channels
, int img_dim
) {
int bl;
for(bl = 0; bl < baselines; bl++) {
if(get_supp(permutations[bl].wp) > 16) break;
}
dim3 griddims_small = dim3(timesteps_x_channels/32, bl);
hipLaunchKernelGGL(( degridGPU<max_gcf_dim, over, false>), dim3(griddims_small), dim3(dim3(32,32)), 0, 0, permutations, out_vis, uvw, gcf, img, timesteps_x_channels, img_dim, 0);
if (bl != baselines - 1) {
dim3 griddims_big = dim3(timesteps_x_channels/32, baselines - bl);
hipLaunchKernelGGL(( degridGPU<max_gcf_dim, over, true>), dim3(griddims_big), dim3(dim3(32,8)), 0, 0, permutations, out_vis, uvw, gcf, img, timesteps_x_channels, img_dim, bl);
}
}
extern "C" void test(
const BlWMap permutations[/* baselines */]
, complexd* out_vis
// Padded, points to the beginning
// of physical data
, const double3 * uvw
// centered in 0 w-plane
, const complexd * gcf[]
, const complexd * img
, int baselines
, int timesteps_x_channels
, int img_dim
)
{
degridGPU<256, 8>(permutations, out_vis, uvw, gcf, img, baselines, timesteps_x_channels, img_dim);
}
| 933eb759365c73b515c71b433e0974efbc19402c.cu | /*
This code is a modified version of NVidia's degridding code:
https://github.com/SKA-ScienceDataProcessor/GPUDegrid/blob/master/degrid_gpu.cu
*/
#include "common.h"
#include "metrix.h"
#include "OskarBinReader.h"
template <int over, bool isbig> struct degridder {};
template <int over> struct degridder<over, true>{
static __device__ __inline__
//__launch_bounds__(256, 6)
// double2 is enough for 'in'
void degrid_kernel(complexd* out, const double3* in, size_t npts, const complexd* img,
size_t img_dim, const complexd* gcf, int gcf_dim) {
//TODO remove hard-coded 32
for (int n = 32*blockIdx.x; n<npts; n+= 32*gridDim.x) {
for (int q=threadIdx.y;q<32;q+=blockDim.y) {
double3 inn = in[n+q];
int sub_x = floorf(over*(inn.x-floorf(inn.x)));
int sub_y = floorf(over*(inn.y-floorf(inn.y)));
int main_x = floorf(inn.x);
int main_y = floorf(inn.y);
double sum_r = 0.0;
double sum_i = 0.0;
for(int a = threadIdx.x-gcf_dim/2;a<(gcf_dim+1)/2;a+=blockDim.x)
for(int b = -gcf_dim/2;b<(gcf_dim+1)/2;b++)
{
//auto this_img = img[main_x+a+img_dim*(main_y+b)];
//auto r1 = this_img.x;
//auto i1 = this_img.y;
auto r1 = img[main_x+a+img_dim*(main_y+b)].x;
auto i1 = img[main_x+a+img_dim*(main_y+b)].y;
if (main_x+a < 0 || main_y+b < 0 ||
main_x+a >= img_dim || main_y+b >= img_dim) {
r1=i1=0.0;
}
//auto this_gcf = __ldg(&gcf[gcf_dim*gcf_dim*(OVER*sub_y+sub_x) +
// gcf_dim*b+a]);
//auto r2 = this_gcf.x;
//auto i2 = this_gcf.y;
auto r2 = __ldg(&gcf[gcf_dim*gcf_dim*(over*sub_y+sub_x) +
gcf_dim*b+a].x);
auto i2 = __ldg(&gcf[gcf_dim*gcf_dim*(over*sub_y+sub_x) +
gcf_dim*b+a].y);
sum_r += r1*r2 - i1*i2;
sum_i += r1*i2 + r2*i1;
}
for(unsigned int s = blockDim.x < 16 ? blockDim.x : 16; s>0;s/=2) {
sum_r += __shfl_down(sum_r,s);
sum_i += __shfl_down(sum_i,s);
}
complexd tmp;
tmp.x = sum_r;
tmp.y = sum_i;
if (threadIdx.x == 0) {
// Add rotation
out[n+q] = rotw(tmp, inn.z);
}
}
}
}
};
template <int over> struct degridder<over, false>{
static __device__ __inline__
//__launch_bounds__(256, 6)
void degrid_kernel(complexd* out, const double3* in, size_t npts, const complexd* img,
size_t img_dim, const complexd* gcf, int gcf_dim) {
//TODO remove hard-coded 32
for (int n = 32*blockIdx.x; n<npts; n+= 32*gridDim.x) {
for (int q=threadIdx.y;q<32;q+=blockDim.y) {
double3 inn = in[n+q];
int sub_x = floorf(over*(inn.x-floorf(inn.x)));
int sub_y = floorf(over*(inn.y-floorf(inn.y)));
int main_x = floorf(inn.x);
int main_y = floorf(inn.y);
double sum_r = 0.0;
double sum_i = 0.0;
int a = -gcf_dim/2 + int(threadIdx.x)%gcf_dim;
for(int b = -gcf_dim/2+int(threadIdx.x)/gcf_dim;b<gcf_dim/2;b+=blockDim.x/gcf_dim)
{
//auto this_img = img[main_x+a+img_dim*(main_y+b)];
//auto r1 = this_img.x;
//auto i1 = this_img.y;
auto r1 = img[main_x+a+img_dim*(main_y+b)].x;
auto i1 = img[main_x+a+img_dim*(main_y+b)].y;
if (main_x+a < 0 || main_y+b < 0 ||
main_x+a >= img_dim || main_y+b >= img_dim) {
r1=i1=0.0;
}
//auto this_gcf = __ldg(&gcf[gcf_dim*gcf_dim*(OVER*sub_y+sub_x) +
// gcf_dim*b+a]);
//auto r2 = this_gcf.x;
//auto i2 = this_gcf.y;
auto r2 = __ldg(&gcf[gcf_dim*gcf_dim*(over*sub_y+sub_x) +
gcf_dim*b+a].x);
auto i2 = __ldg(&gcf[gcf_dim*gcf_dim*(over*sub_y+sub_x) +
gcf_dim*b+a].y);
sum_r += r1*r2 - i1*i2;
sum_i += r1*i2 + r2*i1;
}
for(unsigned int s = blockDim.x < 16 ? blockDim.x : 16; s>0;s/=2) {
sum_r += __shfl_down(sum_r,s);
sum_i += __shfl_down(sum_i,s);
}
complexd tmp;
tmp.x = sum_r;
tmp.y = sum_i;
if (threadIdx.x == 0) {
// Add rotation
out[n+q] = rotw(tmp, inn.z);
}
}
}
}
};
#define __PADAPI static __inline__ __host__ __device__
template <int max_gcf_size> struct degridPadder {
__PADAPI int full_padded_size(int bare_lin_size){
return bare_lin_size*(bare_lin_size+2*max_gcf_size)+2*max_gcf_size;
}
__PADAPI int bare_offset(int bare_lin_size){
return (bare_lin_size+1)*max_gcf_size;
}
};
template <
int max_gcf_dim
, int over
, bool isBig
>
__global__
void degridGPU(
const BlWMap permutations[/* baselines */]
, complexd * out_vis
// Padded, points to the beginning
// of physical data
, const double3 * uvw
// centered in 0 w-plane
, const complexd * gcf[]
, const complexd * img
, size_t timesteps_x_channels
, size_t img_dim
, int blOff
) {
// Temporarily use the second treadblock-grid dimension as baseline dimension
int bl = permutations[blockIdx.y + blOff].bl;
int gcf_dim = get_supp(permutations[bl].wp);
degridder<over, isBig>::degrid_kernel(
out_vis + bl*timesteps_x_channels
, uvw + bl*timesteps_x_channels
, timesteps_x_channels
, img+degridPadder<max_gcf_dim>::bare_offset(img_dim)
, img_dim
, gcf[permutations[bl].wp]
, gcf_dim
);
}
// We assume permutations vector
// is sorted against abs(w_plane_index) compare function
template <
int max_gcf_dim
, int over
>
void degridGPU(
const BlWMap permutations[/* baselines */]
, complexd* out_vis
// Padded, points to the beginning
// of physical data
, const double3 * uvw
// centered in 0 w-plane
, const complexd * gcf[]
, const complexd * img
, int baselines
, int timesteps_x_channels
, int img_dim
) {
int bl;
for(bl = 0; bl < baselines; bl++) {
if(get_supp(permutations[bl].wp) > 16) break;
}
dim3 griddims_small = dim3(timesteps_x_channels/32, bl);
degridGPU<max_gcf_dim, over, false><<<griddims_small, dim3(32,32)>>>(permutations, out_vis, uvw, gcf, img, timesteps_x_channels, img_dim, 0);
if (bl != baselines - 1) {
dim3 griddims_big = dim3(timesteps_x_channels/32, baselines - bl);
degridGPU<max_gcf_dim, over, true><<<griddims_big, dim3(32,8)>>>(permutations, out_vis, uvw, gcf, img, timesteps_x_channels, img_dim, bl);
}
}
extern "C" void test(
const BlWMap permutations[/* baselines */]
, complexd* out_vis
// Padded, points to the beginning
// of physical data
, const double3 * uvw
// centered in 0 w-plane
, const complexd * gcf[]
, const complexd * img
, int baselines
, int timesteps_x_channels
, int img_dim
)
{
degridGPU<256, 8>(permutations, out_vis, uvw, gcf, img, baselines, timesteps_x_channels, img_dim);
}
|
ef4440b0443590d9212241c6262f7784c8bfccf4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/fluid/memory/allocation/allocator.h>
#include <stdio.h>
#include <string>
#include <vector>
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/detection/bbox_util.cu.h"
#include "paddle/fluid/operators/math/math_function.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
namespace {
template <typename T>
static std::pair<Tensor, Tensor> ProposalForOneImage(
const platform::CUDADeviceContext &ctx, const Tensor &im_shape,
const Tensor &anchors, const Tensor &variances,
const Tensor &bbox_deltas, // [M, 4]
const Tensor &scores, // [N, 1]
int pre_nms_top_n, int post_nms_top_n, float nms_thresh, float min_size,
float eta, bool pixel_offset) {
// 1. pre nms
Tensor scores_sort, index_sort;
SortDescending<T>(ctx, scores, &scores_sort, &index_sort);
int num = scores.numel();
int pre_nms_num = (pre_nms_top_n <= 0 || pre_nms_top_n > num) ? scores.numel()
: pre_nms_top_n;
scores_sort.Resize({pre_nms_num, 1});
index_sort.Resize({pre_nms_num, 1});
// 2. box decode and clipping
Tensor proposals;
proposals.mutable_data<T>({pre_nms_num, 4}, ctx.GetPlace());
{
platform::ForRange<platform::CUDADeviceContext> for_range(ctx, pre_nms_num);
for_range(BoxDecodeAndClipFunctor<T>{
anchors.data<T>(), bbox_deltas.data<T>(), variances.data<T>(),
index_sort.data<int>(), im_shape.data<T>(), proposals.data<T>(),
pixel_offset});
}
// 3. filter
Tensor keep_index, keep_num_t;
keep_index.mutable_data<int>({pre_nms_num}, ctx.GetPlace());
keep_num_t.mutable_data<int>({1}, ctx.GetPlace());
min_size = ::max(min_size, 1.0f);
auto stream = ctx.stream();
hipLaunchKernelGGL(( FilterBBoxes<T, 512>), dim3(1), dim3(512), 0, stream,
proposals.data<T>(), im_shape.data<T>(), min_size, pre_nms_num,
keep_num_t.data<int>(), keep_index.data<int>(), false, pixel_offset);
int keep_num;
const auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
memory::Copy(platform::CPUPlace(), &keep_num, gpu_place,
keep_num_t.data<int>(), sizeof(int), ctx.stream());
ctx.Wait();
keep_index.Resize({keep_num});
Tensor scores_filter, proposals_filter;
// Handle the case when there is no keep index left
if (keep_num == 0) {
math::SetConstant<platform::CUDADeviceContext, T> set_zero;
proposals_filter.mutable_data<T>({1, 4}, ctx.GetPlace());
scores_filter.mutable_data<T>({1, 1}, ctx.GetPlace());
set_zero(ctx, &proposals_filter, static_cast<T>(0));
set_zero(ctx, &scores_filter, static_cast<T>(0));
return std::make_pair(proposals_filter, scores_filter);
}
proposals_filter.mutable_data<T>({keep_num, 4}, ctx.GetPlace());
scores_filter.mutable_data<T>({keep_num, 1}, ctx.GetPlace());
GPUGather<T>(ctx, proposals, keep_index, &proposals_filter);
GPUGather<T>(ctx, scores_sort, keep_index, &scores_filter);
if (nms_thresh <= 0) {
return std::make_pair(proposals_filter, scores_filter);
}
// 4. nms
Tensor keep_nms;
NMS<T>(ctx, proposals_filter, keep_index, nms_thresh, &keep_nms,
pixel_offset);
if (post_nms_top_n > 0 && post_nms_top_n < keep_nms.numel()) {
keep_nms.Resize({post_nms_top_n});
}
Tensor scores_nms, proposals_nms;
proposals_nms.mutable_data<T>({keep_nms.numel(), 4}, ctx.GetPlace());
scores_nms.mutable_data<T>({keep_nms.numel(), 1}, ctx.GetPlace());
GPUGather<T>(ctx, proposals_filter, keep_nms, &proposals_nms);
GPUGather<T>(ctx, scores_filter, keep_nms, &scores_nms);
return std::make_pair(proposals_nms, scores_nms);
}
} // namespace
template <typename DeviceContext, typename T>
class CUDAGenerateProposalsV2Kernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *scores = context.Input<Tensor>("Scores");
auto *bbox_deltas = context.Input<Tensor>("BboxDeltas");
auto *im_shape = context.Input<Tensor>("ImShape");
auto anchors = GET_DATA_SAFELY(context.Input<Tensor>("Anchors"), "Input",
"Anchors", "GenerateProposals");
auto variances = GET_DATA_SAFELY(context.Input<Tensor>("Variances"),
"Input", "Variances", "GenerateProposals");
auto *rpn_rois = context.Output<LoDTensor>("RpnRois");
auto *rpn_roi_probs = context.Output<LoDTensor>("RpnRoiProbs");
int pre_nms_top_n = context.Attr<int>("pre_nms_topN");
int post_nms_top_n = context.Attr<int>("post_nms_topN");
float nms_thresh = context.Attr<float>("nms_thresh");
float min_size = context.Attr<float>("min_size");
float eta = context.Attr<float>("eta");
bool pixel_offset = context.Attr<bool>("pixel_offset");
PADDLE_ENFORCE_GE(eta, 1.,
platform::errors::InvalidArgument(
"Not support adaptive NMS. The attribute 'eta' "
"should not less than 1. But received eta=[%d]",
eta));
auto &dev_ctx = context.template device_context<DeviceContext>();
auto scores_dim = scores->dims();
int64_t num = scores_dim[0];
int64_t c_score = scores_dim[1];
int64_t h_score = scores_dim[2];
int64_t w_score = scores_dim[3];
auto bbox_dim = bbox_deltas->dims();
int64_t c_bbox = bbox_dim[1];
int64_t h_bbox = bbox_dim[2];
int64_t w_bbox = bbox_dim[3];
Tensor bbox_deltas_swap, scores_swap;
bbox_deltas_swap.mutable_data<T>({num, h_bbox, w_bbox, c_bbox},
dev_ctx.GetPlace());
scores_swap.mutable_data<T>({num, h_score, w_score, c_score},
dev_ctx.GetPlace());
math::Transpose<DeviceContext, T, 4> trans;
std::vector<int> axis = {0, 2, 3, 1};
trans(dev_ctx, *bbox_deltas, &bbox_deltas_swap, axis);
trans(dev_ctx, *scores, &scores_swap, axis);
anchors.Resize({anchors.numel() / 4, 4});
variances.Resize({variances.numel() / 4, 4});
rpn_rois->mutable_data<T>({bbox_deltas->numel() / 4, 4},
context.GetPlace());
rpn_roi_probs->mutable_data<T>({scores->numel(), 1}, context.GetPlace());
T *rpn_rois_data = rpn_rois->data<T>();
T *rpn_roi_probs_data = rpn_roi_probs->data<T>();
auto place = BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace());
auto cpu_place = platform::CPUPlace();
int64_t num_proposals = 0;
std::vector<size_t> offset(1, 0);
std::vector<int> tmp_num;
for (int64_t i = 0; i < num; ++i) {
Tensor im_shape_slice = im_shape->Slice(i, i + 1);
Tensor bbox_deltas_slice = bbox_deltas_swap.Slice(i, i + 1);
Tensor scores_slice = scores_swap.Slice(i, i + 1);
bbox_deltas_slice.Resize({h_bbox * w_bbox * c_bbox / 4, 4});
scores_slice.Resize({h_score * w_score * c_score, 1});
std::pair<Tensor, Tensor> box_score_pair = ProposalForOneImage<T>(
dev_ctx, im_shape_slice, anchors, variances, bbox_deltas_slice,
scores_slice, pre_nms_top_n, post_nms_top_n, nms_thresh, min_size,
eta, pixel_offset);
Tensor &proposals = box_score_pair.first;
Tensor &scores = box_score_pair.second;
memory::Copy(place, rpn_rois_data + num_proposals * 4, place,
proposals.data<T>(), sizeof(T) * proposals.numel(),
dev_ctx.stream());
memory::Copy(place, rpn_roi_probs_data + num_proposals, place,
scores.data<T>(), sizeof(T) * scores.numel(),
dev_ctx.stream());
dev_ctx.Wait();
num_proposals += proposals.dims()[0];
offset.emplace_back(num_proposals);
tmp_num.push_back(proposals.dims()[0]);
}
if (context.HasOutput("RpnRoisNum")) {
auto *rpn_rois_num = context.Output<Tensor>("RpnRoisNum");
rpn_rois_num->mutable_data<int>({num}, context.GetPlace());
int *num_data = rpn_rois_num->data<int>();
memory::Copy(place, num_data, cpu_place, &tmp_num[0], sizeof(int) * num,
dev_ctx.stream());
rpn_rois_num->Resize({num});
}
framework::LoD lod;
lod.emplace_back(offset);
rpn_rois->set_lod(lod);
rpn_roi_probs->set_lod(lod);
rpn_rois->Resize({num_proposals, 4});
rpn_roi_probs->Resize({num_proposals, 1});
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(generate_proposals_v2,
ops::CUDAGenerateProposalsV2Kernel<
paddle::platform::CUDADeviceContext, float>);
| ef4440b0443590d9212241c6262f7784c8bfccf4.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/fluid/memory/allocation/allocator.h>
#include <stdio.h>
#include <string>
#include <vector>
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/detection/bbox_util.cu.h"
#include "paddle/fluid/operators/math/math_function.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
namespace {
template <typename T>
static std::pair<Tensor, Tensor> ProposalForOneImage(
const platform::CUDADeviceContext &ctx, const Tensor &im_shape,
const Tensor &anchors, const Tensor &variances,
const Tensor &bbox_deltas, // [M, 4]
const Tensor &scores, // [N, 1]
int pre_nms_top_n, int post_nms_top_n, float nms_thresh, float min_size,
float eta, bool pixel_offset) {
// 1. pre nms
Tensor scores_sort, index_sort;
SortDescending<T>(ctx, scores, &scores_sort, &index_sort);
int num = scores.numel();
int pre_nms_num = (pre_nms_top_n <= 0 || pre_nms_top_n > num) ? scores.numel()
: pre_nms_top_n;
scores_sort.Resize({pre_nms_num, 1});
index_sort.Resize({pre_nms_num, 1});
// 2. box decode and clipping
Tensor proposals;
proposals.mutable_data<T>({pre_nms_num, 4}, ctx.GetPlace());
{
platform::ForRange<platform::CUDADeviceContext> for_range(ctx, pre_nms_num);
for_range(BoxDecodeAndClipFunctor<T>{
anchors.data<T>(), bbox_deltas.data<T>(), variances.data<T>(),
index_sort.data<int>(), im_shape.data<T>(), proposals.data<T>(),
pixel_offset});
}
// 3. filter
Tensor keep_index, keep_num_t;
keep_index.mutable_data<int>({pre_nms_num}, ctx.GetPlace());
keep_num_t.mutable_data<int>({1}, ctx.GetPlace());
min_size = std::max(min_size, 1.0f);
auto stream = ctx.stream();
FilterBBoxes<T, 512><<<1, 512, 0, stream>>>(
proposals.data<T>(), im_shape.data<T>(), min_size, pre_nms_num,
keep_num_t.data<int>(), keep_index.data<int>(), false, pixel_offset);
int keep_num;
const auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
memory::Copy(platform::CPUPlace(), &keep_num, gpu_place,
keep_num_t.data<int>(), sizeof(int), ctx.stream());
ctx.Wait();
keep_index.Resize({keep_num});
Tensor scores_filter, proposals_filter;
// Handle the case when there is no keep index left
if (keep_num == 0) {
math::SetConstant<platform::CUDADeviceContext, T> set_zero;
proposals_filter.mutable_data<T>({1, 4}, ctx.GetPlace());
scores_filter.mutable_data<T>({1, 1}, ctx.GetPlace());
set_zero(ctx, &proposals_filter, static_cast<T>(0));
set_zero(ctx, &scores_filter, static_cast<T>(0));
return std::make_pair(proposals_filter, scores_filter);
}
proposals_filter.mutable_data<T>({keep_num, 4}, ctx.GetPlace());
scores_filter.mutable_data<T>({keep_num, 1}, ctx.GetPlace());
GPUGather<T>(ctx, proposals, keep_index, &proposals_filter);
GPUGather<T>(ctx, scores_sort, keep_index, &scores_filter);
if (nms_thresh <= 0) {
return std::make_pair(proposals_filter, scores_filter);
}
// 4. nms
Tensor keep_nms;
NMS<T>(ctx, proposals_filter, keep_index, nms_thresh, &keep_nms,
pixel_offset);
if (post_nms_top_n > 0 && post_nms_top_n < keep_nms.numel()) {
keep_nms.Resize({post_nms_top_n});
}
Tensor scores_nms, proposals_nms;
proposals_nms.mutable_data<T>({keep_nms.numel(), 4}, ctx.GetPlace());
scores_nms.mutable_data<T>({keep_nms.numel(), 1}, ctx.GetPlace());
GPUGather<T>(ctx, proposals_filter, keep_nms, &proposals_nms);
GPUGather<T>(ctx, scores_filter, keep_nms, &scores_nms);
return std::make_pair(proposals_nms, scores_nms);
}
} // namespace
template <typename DeviceContext, typename T>
class CUDAGenerateProposalsV2Kernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *scores = context.Input<Tensor>("Scores");
auto *bbox_deltas = context.Input<Tensor>("BboxDeltas");
auto *im_shape = context.Input<Tensor>("ImShape");
auto anchors = GET_DATA_SAFELY(context.Input<Tensor>("Anchors"), "Input",
"Anchors", "GenerateProposals");
auto variances = GET_DATA_SAFELY(context.Input<Tensor>("Variances"),
"Input", "Variances", "GenerateProposals");
auto *rpn_rois = context.Output<LoDTensor>("RpnRois");
auto *rpn_roi_probs = context.Output<LoDTensor>("RpnRoiProbs");
int pre_nms_top_n = context.Attr<int>("pre_nms_topN");
int post_nms_top_n = context.Attr<int>("post_nms_topN");
float nms_thresh = context.Attr<float>("nms_thresh");
float min_size = context.Attr<float>("min_size");
float eta = context.Attr<float>("eta");
bool pixel_offset = context.Attr<bool>("pixel_offset");
PADDLE_ENFORCE_GE(eta, 1.,
platform::errors::InvalidArgument(
"Not support adaptive NMS. The attribute 'eta' "
"should not less than 1. But received eta=[%d]",
eta));
auto &dev_ctx = context.template device_context<DeviceContext>();
auto scores_dim = scores->dims();
int64_t num = scores_dim[0];
int64_t c_score = scores_dim[1];
int64_t h_score = scores_dim[2];
int64_t w_score = scores_dim[3];
auto bbox_dim = bbox_deltas->dims();
int64_t c_bbox = bbox_dim[1];
int64_t h_bbox = bbox_dim[2];
int64_t w_bbox = bbox_dim[3];
Tensor bbox_deltas_swap, scores_swap;
bbox_deltas_swap.mutable_data<T>({num, h_bbox, w_bbox, c_bbox},
dev_ctx.GetPlace());
scores_swap.mutable_data<T>({num, h_score, w_score, c_score},
dev_ctx.GetPlace());
math::Transpose<DeviceContext, T, 4> trans;
std::vector<int> axis = {0, 2, 3, 1};
trans(dev_ctx, *bbox_deltas, &bbox_deltas_swap, axis);
trans(dev_ctx, *scores, &scores_swap, axis);
anchors.Resize({anchors.numel() / 4, 4});
variances.Resize({variances.numel() / 4, 4});
rpn_rois->mutable_data<T>({bbox_deltas->numel() / 4, 4},
context.GetPlace());
rpn_roi_probs->mutable_data<T>({scores->numel(), 1}, context.GetPlace());
T *rpn_rois_data = rpn_rois->data<T>();
T *rpn_roi_probs_data = rpn_roi_probs->data<T>();
auto place = BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace());
auto cpu_place = platform::CPUPlace();
int64_t num_proposals = 0;
std::vector<size_t> offset(1, 0);
std::vector<int> tmp_num;
for (int64_t i = 0; i < num; ++i) {
Tensor im_shape_slice = im_shape->Slice(i, i + 1);
Tensor bbox_deltas_slice = bbox_deltas_swap.Slice(i, i + 1);
Tensor scores_slice = scores_swap.Slice(i, i + 1);
bbox_deltas_slice.Resize({h_bbox * w_bbox * c_bbox / 4, 4});
scores_slice.Resize({h_score * w_score * c_score, 1});
std::pair<Tensor, Tensor> box_score_pair = ProposalForOneImage<T>(
dev_ctx, im_shape_slice, anchors, variances, bbox_deltas_slice,
scores_slice, pre_nms_top_n, post_nms_top_n, nms_thresh, min_size,
eta, pixel_offset);
Tensor &proposals = box_score_pair.first;
Tensor &scores = box_score_pair.second;
memory::Copy(place, rpn_rois_data + num_proposals * 4, place,
proposals.data<T>(), sizeof(T) * proposals.numel(),
dev_ctx.stream());
memory::Copy(place, rpn_roi_probs_data + num_proposals, place,
scores.data<T>(), sizeof(T) * scores.numel(),
dev_ctx.stream());
dev_ctx.Wait();
num_proposals += proposals.dims()[0];
offset.emplace_back(num_proposals);
tmp_num.push_back(proposals.dims()[0]);
}
if (context.HasOutput("RpnRoisNum")) {
auto *rpn_rois_num = context.Output<Tensor>("RpnRoisNum");
rpn_rois_num->mutable_data<int>({num}, context.GetPlace());
int *num_data = rpn_rois_num->data<int>();
memory::Copy(place, num_data, cpu_place, &tmp_num[0], sizeof(int) * num,
dev_ctx.stream());
rpn_rois_num->Resize({num});
}
framework::LoD lod;
lod.emplace_back(offset);
rpn_rois->set_lod(lod);
rpn_roi_probs->set_lod(lod);
rpn_rois->Resize({num_proposals, 4});
rpn_roi_probs->Resize({num_proposals, 1});
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(generate_proposals_v2,
ops::CUDAGenerateProposalsV2Kernel<
paddle::platform::CUDADeviceContext, float>);
|
a468a52512920945c08e7f6bfd2e9722e77af7bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuASR_helper.h"
#include "cuasr/functional.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include <algorithm>
#include <cstdlib>
#include <cstring>
#include <experimental/source_location>
#include <iostream>
namespace {
void check_for_cuda_error([[maybe_unused]] std::experimental::source_location s =
std::experimental::source_location::current()) {
#ifndef NDEBUG
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
std::cerr << "CUDA Error: " << hipGetErrorString(err) << '\n';
std::cerr << " file: " << s.file_name() << '\n'
<< " function: " << s.function_name() << '\n'
<< " line: " << s.line() << "\n\n";
std::exit(1);
}
#endif
}
void set_to_zero_prob(HMM::Mod_prob_t* data, size_t how_much) {
for (size_t i = 0; i < how_much; ++i) {
data[i] = HMM::zero_prob;
}
}
void cuda_matrix_deleter(cuASR_helper::Dev_mat& mat) {
if (mat.data != nullptr) {
hipFree(static_cast<void*>(mat.data));
check_for_cuda_error();
mat.data = nullptr;
}
}
void copy_Dev_mat(cuASR_helper::Dev_mat& lhs, const cuASR_helper::Dev_mat& rhs) {
cuda_matrix_deleter(lhs);
lhs.rows = rhs.rows;
lhs.cols = rhs.cols;
lhs.bytes_size = rhs.bytes_size;
hipMalloc((void**)&(lhs.data), lhs.bytes_size);
check_for_cuda_error();
hipMemcpy((void*)lhs.data, (const void*)rhs.data, lhs.bytes_size, hipMemcpyDeviceToDevice);
check_for_cuda_error();
}
void move_Dev_mat(cuASR_helper::Dev_mat& lhs, cuASR_helper::Dev_mat&& rhs) {
cuda_matrix_deleter(lhs);
lhs.rows = rhs.rows;
lhs.cols = rhs.cols;
lhs.bytes_size = rhs.bytes_size;
lhs.data = rhs.data;
rhs.data = nullptr;
}
} // namespace
namespace cuASR_helper {
using AdditionOp = cuasr::minimum<float>;
using MultiplicationOp = cuasr::plus<float>;
using RowMajor = cutlass::layout::RowMajor;
using cuASR_MinPlus_SGEMM =
cuasr::gemm::device::Srgemm<AdditionOp, MultiplicationOp, HMM::Mod_prob_t, RowMajor,
HMM::Mod_prob_t, RowMajor, HMM::Mod_prob_t, RowMajor,
HMM::Mod_prob_t>;
Dev_mat::Dev_mat(int rows, int cols)
: rows(rows), cols(cols), bytes_size(rows * cols * sizeof(HMM::Mod_prob_t)) {
hipMalloc((void**)&data, bytes_size);
check_for_cuda_error();
}
Dev_mat::Dev_mat(const Dev_mat& rhs) : data(nullptr) { copy_Dev_mat(*this, rhs); }
Dev_mat& Dev_mat::operator=(const Dev_mat& rhs) {
copy_Dev_mat(*this, rhs);
return *this;
}
Dev_mat::Dev_mat(Dev_mat&& rhs) : data(nullptr) { move_Dev_mat(*this, std::move(rhs)); }
Dev_mat& Dev_mat::operator=(Dev_mat&& rhs) {
move_Dev_mat(*this, std::move(rhs));
return *this;
}
Dev_mat::~Dev_mat() { cuda_matrix_deleter(*this); }
void validate_Dev_mat_ptr([[maybe_unused]] const Dev_mat& mat,
[[maybe_unused]] const std::string& msg) {
#ifndef NDEBUG
auto attr = hipPointerAttribute_t();
hipPointerGetAttributes(&attr, (const void*)mat.data);
if (attr.memoryType != hipMemoryTypeDevice) {
std::cout << "Not a device pointer " << msg << ", is host/unregistered? "
<< (attr.memoryType == hipMemoryTypeHost) << ' '
<< (attr.memoryType == cudaMemoryTypeUnregistered) << '\n';
}
#endif
}
void min_plus_Dev_mat_multiply(const Dev_mat& lhs, const Dev_mat& rhs, Dev_mat& res) {
cuda_matrix_deleter(res);
res = Dev_mat(lhs.rows, rhs.cols);
validate_Dev_mat_ptr(res, "res");
#ifndef NDEBUG
if (lhs.cols != rhs.rows) {
std::cerr << "cuASR: lhs and rhs cols/rows mismatch! "
<< "Lhs.cols is " << lhs.cols << ". "
<< "Rhs.cols is " << rhs.rows << '\n';
}
if (lhs.rows != res.rows) {
std::cerr << "cuASR: lhs and res rows mismatch! "
<< "Lhs.rows is " << lhs.rows << ". "
<< "Res.rows is " << res.rows << '\n';
}
if (rhs.cols != res.cols) {
std::cerr << "cuASR: rhs and res cols mismatch! "
<< "Rhs.cols is " << rhs.cols << ". "
<< "Res.cols is " << res.cols << '\n';
}
#endif
auto args = cuASR_MinPlus_SGEMM::Arguments(
{res.rows, res.cols, lhs.cols}, {lhs.data, lhs.cols}, {rhs.data, rhs.cols},
{res.data, res.cols}, {res.data, res.cols},
{MultiplicationOp::Identity, MultiplicationOp::Annihilator});
auto minplus_gemm = cuASR_MinPlus_SGEMM();
auto status = minplus_gemm(args, nullptr, nullptr);
hipDeviceSynchronize();
check_for_cuda_error();
if ((int)status) {
std::cerr << "Matrix multiply error code " << (int)status << '\n'
<< cutlassGetStatusString(status) << '\n';
}
}
HMM::Mod_prob_vec_t Dev_mat_to_Prob_vec(const Dev_mat& mat) {
#ifndef NDEBUG
if (mat.cols != 1) {
std::cerr << "Error! cuASR Dev_mat is not a column!\n";
}
#endif
auto host_data = new HMM::Mod_prob_t[mat.rows * mat.cols];
hipMemcpy((void*)host_data, (const void*)mat.data, mat.bytes_size, hipMemcpyDeviceToHost);
check_for_cuda_error();
auto res = HMM::Mod_prob_vec_t(host_data, host_data + mat.rows * mat.cols);
std::replace_if(
res.begin(), res.end(),
[](auto prob) {
return HMM::almost_equal(std::numeric_limits<HMM::Mod_prob_t>::max(), prob);
},
HMM::zero_prob);
delete[] host_data;
return res;
}
void init_matrices_from_HMM(const HMM& hmm, Dev_mat& start_pr, Dev_mat& transp_tr,
std::vector<Dev_mat>& emit_mat_vec) {
// Column for start probs
auto start_host_ptr = new HMM::Mod_prob_t[hmm.states_num];
set_to_zero_prob(start_host_ptr, hmm.states_num);
for (size_t i = 0; i < hmm.non_zero_start_probs; ++i) {
start_host_ptr[hmm.start_probabilities_cols[i]] = hmm.start_probabilities[i];
}
// Row major transposed transition matrix
auto transp_tr_host_ptr = new HMM::Mod_prob_t[hmm.states_num * hmm.states_num];
set_to_zero_prob(transp_tr_host_ptr, hmm.states_num * hmm.states_num);
for (size_t i = 0; i < hmm.trans_num; ++i) {
auto row = hmm.trans_cols[i];
auto col = hmm.trans_rows[i];
auto val = hmm.trans_probs[i];
transp_tr_host_ptr[row * hmm.states_num + col] = val;
}
// Diagonal matrices
auto emit_mat_vec_host = std::vector<HMM::Mod_prob_t*>(hmm.emit_num);
for (size_t i = 0; i < hmm.emit_num; ++i) {
auto& m = emit_mat_vec_host[i];
m = new HMM::Mod_prob_t[hmm.states_num * hmm.states_num];
set_to_zero_prob(m, hmm.states_num * hmm.states_num);
for (size_t j = 0; j < hmm.states_num; ++j) {
m[j * hmm.states_num + j] = hmm.emissions[i][j];
}
}
start_pr = Dev_mat((int)hmm.states_num, 1);
transp_tr = Dev_mat((int)hmm.states_num, (int)hmm.states_num);
emit_mat_vec = std::vector<Dev_mat>(hmm.emit_num);
for (size_t i = 0; i < hmm.emit_num; ++i) {
emit_mat_vec[i] = Dev_mat((int)hmm.states_num, (int)hmm.states_num);
}
// Transfer data to device
hipMemcpy((void*)start_pr.data, (const void*)start_host_ptr, start_pr.bytes_size,
hipMemcpyHostToDevice);
check_for_cuda_error();
hipMemcpy((void*)transp_tr.data, (const void*)transp_tr_host_ptr, transp_tr.bytes_size,
hipMemcpyHostToDevice);
check_for_cuda_error();
for (size_t i = 0; i < hmm.emit_num; ++i) {
hipMemcpy((void*)emit_mat_vec[i].data, (const void*)emit_mat_vec_host[i],
emit_mat_vec[i].bytes_size, hipMemcpyHostToDevice);
check_for_cuda_error();
}
// Free host memory
delete[] start_host_ptr;
delete[] transp_tr_host_ptr;
for (auto& m : emit_mat_vec_host) {
delete[] m;
}
}
} // namespace cuASR_helper
| a468a52512920945c08e7f6bfd2e9722e77af7bb.cu | #include "cuASR_helper.h"
#include "cuasr/functional.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include <algorithm>
#include <cstdlib>
#include <cstring>
#include <experimental/source_location>
#include <iostream>
namespace {
void check_for_cuda_error([[maybe_unused]] std::experimental::source_location s =
std::experimental::source_location::current()) {
#ifndef NDEBUG
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
std::cerr << "CUDA Error: " << cudaGetErrorString(err) << '\n';
std::cerr << " file: " << s.file_name() << '\n'
<< " function: " << s.function_name() << '\n'
<< " line: " << s.line() << "\n\n";
std::exit(1);
}
#endif
}
void set_to_zero_prob(HMM::Mod_prob_t* data, size_t how_much) {
for (size_t i = 0; i < how_much; ++i) {
data[i] = HMM::zero_prob;
}
}
void cuda_matrix_deleter(cuASR_helper::Dev_mat& mat) {
if (mat.data != nullptr) {
cudaFree(static_cast<void*>(mat.data));
check_for_cuda_error();
mat.data = nullptr;
}
}
void copy_Dev_mat(cuASR_helper::Dev_mat& lhs, const cuASR_helper::Dev_mat& rhs) {
cuda_matrix_deleter(lhs);
lhs.rows = rhs.rows;
lhs.cols = rhs.cols;
lhs.bytes_size = rhs.bytes_size;
cudaMalloc((void**)&(lhs.data), lhs.bytes_size);
check_for_cuda_error();
cudaMemcpy((void*)lhs.data, (const void*)rhs.data, lhs.bytes_size, cudaMemcpyDeviceToDevice);
check_for_cuda_error();
}
void move_Dev_mat(cuASR_helper::Dev_mat& lhs, cuASR_helper::Dev_mat&& rhs) {
cuda_matrix_deleter(lhs);
lhs.rows = rhs.rows;
lhs.cols = rhs.cols;
lhs.bytes_size = rhs.bytes_size;
lhs.data = rhs.data;
rhs.data = nullptr;
}
} // namespace
namespace cuASR_helper {
using AdditionOp = cuasr::minimum<float>;
using MultiplicationOp = cuasr::plus<float>;
using RowMajor = cutlass::layout::RowMajor;
using cuASR_MinPlus_SGEMM =
cuasr::gemm::device::Srgemm<AdditionOp, MultiplicationOp, HMM::Mod_prob_t, RowMajor,
HMM::Mod_prob_t, RowMajor, HMM::Mod_prob_t, RowMajor,
HMM::Mod_prob_t>;
Dev_mat::Dev_mat(int rows, int cols)
: rows(rows), cols(cols), bytes_size(rows * cols * sizeof(HMM::Mod_prob_t)) {
cudaMalloc((void**)&data, bytes_size);
check_for_cuda_error();
}
Dev_mat::Dev_mat(const Dev_mat& rhs) : data(nullptr) { copy_Dev_mat(*this, rhs); }
Dev_mat& Dev_mat::operator=(const Dev_mat& rhs) {
copy_Dev_mat(*this, rhs);
return *this;
}
Dev_mat::Dev_mat(Dev_mat&& rhs) : data(nullptr) { move_Dev_mat(*this, std::move(rhs)); }
Dev_mat& Dev_mat::operator=(Dev_mat&& rhs) {
move_Dev_mat(*this, std::move(rhs));
return *this;
}
Dev_mat::~Dev_mat() { cuda_matrix_deleter(*this); }
void validate_Dev_mat_ptr([[maybe_unused]] const Dev_mat& mat,
[[maybe_unused]] const std::string& msg) {
#ifndef NDEBUG
auto attr = cudaPointerAttributes();
cudaPointerGetAttributes(&attr, (const void*)mat.data);
if (attr.memoryType != cudaMemoryTypeDevice) {
std::cout << "Not a device pointer " << msg << ", is host/unregistered? "
<< (attr.memoryType == cudaMemoryTypeHost) << ' '
<< (attr.memoryType == cudaMemoryTypeUnregistered) << '\n';
}
#endif
}
void min_plus_Dev_mat_multiply(const Dev_mat& lhs, const Dev_mat& rhs, Dev_mat& res) {
cuda_matrix_deleter(res);
res = Dev_mat(lhs.rows, rhs.cols);
validate_Dev_mat_ptr(res, "res");
#ifndef NDEBUG
if (lhs.cols != rhs.rows) {
std::cerr << "cuASR: lhs and rhs cols/rows mismatch! "
<< "Lhs.cols is " << lhs.cols << ". "
<< "Rhs.cols is " << rhs.rows << '\n';
}
if (lhs.rows != res.rows) {
std::cerr << "cuASR: lhs and res rows mismatch! "
<< "Lhs.rows is " << lhs.rows << ". "
<< "Res.rows is " << res.rows << '\n';
}
if (rhs.cols != res.cols) {
std::cerr << "cuASR: rhs and res cols mismatch! "
<< "Rhs.cols is " << rhs.cols << ". "
<< "Res.cols is " << res.cols << '\n';
}
#endif
auto args = cuASR_MinPlus_SGEMM::Arguments(
{res.rows, res.cols, lhs.cols}, {lhs.data, lhs.cols}, {rhs.data, rhs.cols},
{res.data, res.cols}, {res.data, res.cols},
{MultiplicationOp::Identity, MultiplicationOp::Annihilator});
auto minplus_gemm = cuASR_MinPlus_SGEMM();
auto status = minplus_gemm(args, nullptr, nullptr);
cudaDeviceSynchronize();
check_for_cuda_error();
if ((int)status) {
std::cerr << "Matrix multiply error code " << (int)status << '\n'
<< cutlassGetStatusString(status) << '\n';
}
}
HMM::Mod_prob_vec_t Dev_mat_to_Prob_vec(const Dev_mat& mat) {
#ifndef NDEBUG
if (mat.cols != 1) {
std::cerr << "Error! cuASR Dev_mat is not a column!\n";
}
#endif
auto host_data = new HMM::Mod_prob_t[mat.rows * mat.cols];
cudaMemcpy((void*)host_data, (const void*)mat.data, mat.bytes_size, cudaMemcpyDeviceToHost);
check_for_cuda_error();
auto res = HMM::Mod_prob_vec_t(host_data, host_data + mat.rows * mat.cols);
std::replace_if(
res.begin(), res.end(),
[](auto prob) {
return HMM::almost_equal(std::numeric_limits<HMM::Mod_prob_t>::max(), prob);
},
HMM::zero_prob);
delete[] host_data;
return res;
}
void init_matrices_from_HMM(const HMM& hmm, Dev_mat& start_pr, Dev_mat& transp_tr,
std::vector<Dev_mat>& emit_mat_vec) {
// Column for start probs
auto start_host_ptr = new HMM::Mod_prob_t[hmm.states_num];
set_to_zero_prob(start_host_ptr, hmm.states_num);
for (size_t i = 0; i < hmm.non_zero_start_probs; ++i) {
start_host_ptr[hmm.start_probabilities_cols[i]] = hmm.start_probabilities[i];
}
// Row major transposed transition matrix
auto transp_tr_host_ptr = new HMM::Mod_prob_t[hmm.states_num * hmm.states_num];
set_to_zero_prob(transp_tr_host_ptr, hmm.states_num * hmm.states_num);
for (size_t i = 0; i < hmm.trans_num; ++i) {
auto row = hmm.trans_cols[i];
auto col = hmm.trans_rows[i];
auto val = hmm.trans_probs[i];
transp_tr_host_ptr[row * hmm.states_num + col] = val;
}
// Diagonal matrices
auto emit_mat_vec_host = std::vector<HMM::Mod_prob_t*>(hmm.emit_num);
for (size_t i = 0; i < hmm.emit_num; ++i) {
auto& m = emit_mat_vec_host[i];
m = new HMM::Mod_prob_t[hmm.states_num * hmm.states_num];
set_to_zero_prob(m, hmm.states_num * hmm.states_num);
for (size_t j = 0; j < hmm.states_num; ++j) {
m[j * hmm.states_num + j] = hmm.emissions[i][j];
}
}
start_pr = Dev_mat((int)hmm.states_num, 1);
transp_tr = Dev_mat((int)hmm.states_num, (int)hmm.states_num);
emit_mat_vec = std::vector<Dev_mat>(hmm.emit_num);
for (size_t i = 0; i < hmm.emit_num; ++i) {
emit_mat_vec[i] = Dev_mat((int)hmm.states_num, (int)hmm.states_num);
}
// Transfer data to device
cudaMemcpy((void*)start_pr.data, (const void*)start_host_ptr, start_pr.bytes_size,
cudaMemcpyHostToDevice);
check_for_cuda_error();
cudaMemcpy((void*)transp_tr.data, (const void*)transp_tr_host_ptr, transp_tr.bytes_size,
cudaMemcpyHostToDevice);
check_for_cuda_error();
for (size_t i = 0; i < hmm.emit_num; ++i) {
cudaMemcpy((void*)emit_mat_vec[i].data, (const void*)emit_mat_vec_host[i],
emit_mat_vec[i].bytes_size, cudaMemcpyHostToDevice);
check_for_cuda_error();
}
// Free host memory
delete[] start_host_ptr;
delete[] transp_tr_host_ptr;
for (auto& m : emit_mat_vec_host) {
delete[] m;
}
}
} // namespace cuASR_helper
|
53261259cf7309e8235f72bbea6766bdba3f4f8c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*_________________________________________________________________________
* ww2parCC_device_066DP.cu - calculates the self-induced velocity field of the wake.
* Parallel version on GPU - CUDA code executed on device
*
* CUDA kernel function (executed on the device, called from the host) +
* CUDA block & thread functions (executed on the device, called from device)
* Manages data flow, launches and syncronize threads blocks
*
* DUWIND- Delft University Wind Energy Research Institute
* developer: Giuseppe Tescione
*
* Version: 0.6.6DP (alpha) - 20110824
* basic version with no loop unrolling, no wrap and no multithread bodies
* simple cut-off constant for desingularization
* double precision (for GPUs of computing capability 2.x)
*________________________________________________________________________*/
//Definition of double2 and double3 types
//typedef struct {
//double x, y;
//} double2;
//typedef struct {
//double x, y, z;
//} double3;
__constant__ int blocksize_gpu;
__constant__ int nParticles_gpu;
__constant__ int nTargets_gpu;
__constant__ int nParticleBlocks_gpu;
__constant__ int nTargetBlocks_gpu;
__constant__ double ksigmasqr_gpu;
__constant__ double inv_pi_gpu;
__constant__ double myeps_gpu;
/* constants (block dimension, number of particle, cut-off and 1/2pi) residing in
constant memory space, accessible from all threads within the grid and from the host.
Defined in host code*/
__device__ double ww2par_thread(double THR_vorticity, double THR_xTarget, double THR_yTarget, double THR_xBlob, double THR_yBlob, double THR_wBlob)
/*THREAD FUNCTION - set of instructions performed parallely by each processor.
Calculates velocity induction on target particles by source particle.
Takes as input:
THR_UIND (2x double THR_UIND.x & THR_UIND.y) -> velocity induction of target particles
already computed by previous thread blocks to which adds the new induction;
THR_TARG (3x double THR_TARG.x & THR_TARG.y & THR_TARG.z) -> position (x, y) and
vorticity (z) of target particles.Position is needed to calculate induction
but vorticity is not used but kept to mantain data structure coeherency;
THR_SRC (3x double THR_SRC.x & THR_SRC.y & THR_SRC.z) -> position (x, y) and
vorticity (z) of source particles, needed to calculate induction.
Gives as output:
THR_UIND (2x double THR_UIND.x & THR_UIND.y) -> updated velocity induction of targets */
{
//targets-particle distance, local variable [2 FLOPS]
// printf("Thread %d; xTarget %f; yTarget %f; xBlob %f; yBlob %f, wBlob %f\n -----------------------------------------\n",threadIdx.x,THR_xTarget,THR_yTarget,THR_xBlob,THR_yBlob,THR_wBlob);
double2 RAD;
RAD.x = THR_xTarget - THR_xBlob;
RAD.y = THR_yTarget - THR_yBlob;
//square of distance plus cut-off, local variable [4 FLOPS]
double RADSQR = RAD.x * RAD.x + RAD.y * RAD.y + myeps_gpu;
//vorticity/(2pi*sqr(rad)) [2 FLOPS]
double S = THR_wBlob * inv_pi_gpu / ksigmasqr_gpu;
//update velocity induction [4 FLOPS]
THR_vorticity += S * exp(-RADSQR/(ksigmasqr_gpu));
return THR_vorticity;
}
__device__ double ww2par_block(double BLK_xTarget, double BLK_yTarget, double BLK_vorticity)
/*BLOCK FUNCTION - data & execution management for thread block
Evaluate induction in a pxp block
Takes as input:
BLK_TARG (3x double BLK_TARG.x & BLK_TARG.y & BLK_TARG.z) -> position (x, y)
and vorticity (z) of target. Passed unchanged to THREAD CODE as TARGET;
BLK_UIND (2x double BLK_UIND.x & BLK_UIND.y) -> velocity induction of target
particles. Passed unchanged to THREAD CODE as UIND.
Gives as output:
BLK_UIND (2x double BLK_UIND.x & BLK_UIND.y) -> updated velocity induction
of target. Received unchanged by THREAD CODE as UIND */
{
extern __shared__ double BLK_blob [];
//extern __shared__ double BLK_yBlob [];
//extern __shared__ double BLK_wBlob [];
/* External variable residing in shared memory space of thread block,
accessible from all threads within the block. Source particles data array
(position (x & y) and vorticity (z)) common to the block.
Size of the array is determined at launch time with instruction [] */
//call thread function for every thread in block
for ( int i = 0; i <blockDim.x; i++)
{
BLK_vorticity = ww2par_thread(BLK_vorticity, BLK_xTarget, BLK_yTarget, BLK_blob[i], BLK_blob[i+blocksize_gpu], BLK_blob[i+2*blocksize_gpu]);
}
return BLK_vorticity;
}
__global__ void ww2par_kernel(void *cxBlob_gpu_ondevice, void *cyBlob_gpu_ondevice, void *cwBlob_gpu_ondevice, void *cxTarget_gpu_ondevice, void *cyTarget_gpu_ondevice, void *cw_gpu_ondevice)
/*KERNEL FUNCTION - data & execution management for block grid
Kernel executed on the device, called from the host.
Manages memory passages from host to device and executes block function
Takes as input:
*ONDEV_POS and *ONDEV_IND -> pointers to global device memory for the
position and induction of particles */
{
extern __shared__ double BLK_blob []; //see above
//extern __shared__ double BLK_yBlob []; //see above
//extern __shared__ double BLK_wBlob []; //see above
//pointers passage
double * KRN_xBlob = (double *)cxBlob_gpu_ondevice;
double * KRN_yBlob = (double *)cyBlob_gpu_ondevice;
double * KRN_wBlob = (double *)cwBlob_gpu_ondevice;
double * KRN_xTarget = (double *)cxTarget_gpu_ondevice;
double * KRN_yTarget = (double *)cyTarget_gpu_ondevice;
double * KRN_w = (double *)cw_gpu_ondevice;
//induction initialization
double BLK_vorticity;
BLK_vorticity = 0;
//target particles definition
double BLK_xTarget;
double BLK_yTarget;
int NTHR = blockIdx.x * blockDim.x + threadIdx.x;
BLK_xTarget = KRN_xTarget[NTHR];
BLK_yTarget = KRN_yTarget[NTHR];
//printf("Block %d; Thread %d :: Before the loop\n",blockIdx.x,threadIdx.x);
int i, block;
for (i = 0, block = 0; i < nParticles_gpu; i += blocksize_gpu, block++)//LOOP over blocks
{
//source particle definition (shared data)
int id = block * blockDim.x + threadIdx.x;
BLK_blob [threadIdx.x] = KRN_xBlob[id];
BLK_blob [threadIdx.x + blocksize_gpu] = KRN_yBlob[id];
BLK_blob [threadIdx.x + 2*blocksize_gpu] = KRN_wBlob[id];
__syncthreads();
// all shared memory locations are populated before starting computation
BLK_vorticity = ww2par_block(BLK_xTarget, BLK_yTarget, BLK_vorticity); //block function call
__syncthreads();
//all threads within block finish computation before advancing next block
}
//save results in global memory
double WIND = BLK_vorticity;
KRN_w[NTHR] = WIND;
}
| 53261259cf7309e8235f72bbea6766bdba3f4f8c.cu | /*_________________________________________________________________________
* ww2parCC_device_066DP.cu - calculates the self-induced velocity field of the wake.
* Parallel version on GPU - CUDA code executed on device
*
* CUDA kernel function (executed on the device, called from the host) +
* CUDA block & thread functions (executed on the device, called from device)
* Manages data flow, launches and syncronize threads blocks
*
* DUWIND- Delft University Wind Energy Research Institute
* developer: Giuseppe Tescione
*
* Version: 0.6.6DP (alpha) - 20110824
* basic version with no loop unrolling, no wrap and no multithread bodies
* simple cut-off constant for desingularization
* double precision (for GPUs of computing capability 2.x)
*________________________________________________________________________*/
//Definition of double2 and double3 types
//typedef struct {
//double x, y;
//} double2;
//typedef struct {
//double x, y, z;
//} double3;
__constant__ int blocksize_gpu;
__constant__ int nParticles_gpu;
__constant__ int nTargets_gpu;
__constant__ int nParticleBlocks_gpu;
__constant__ int nTargetBlocks_gpu;
__constant__ double ksigmasqr_gpu;
__constant__ double inv_pi_gpu;
__constant__ double myeps_gpu;
/* constants (block dimension, number of particle, cut-off and 1/2pi) residing in
constant memory space, accessible from all threads within the grid and from the host.
Defined in host code*/
__device__ double ww2par_thread(double THR_vorticity, double THR_xTarget, double THR_yTarget, double THR_xBlob, double THR_yBlob, double THR_wBlob)
/*THREAD FUNCTION - set of instructions performed parallely by each processor.
Calculates velocity induction on target particles by source particle.
Takes as input:
THR_UIND (2x double THR_UIND.x & THR_UIND.y) -> velocity induction of target particles
already computed by previous thread blocks to which adds the new induction;
THR_TARG (3x double THR_TARG.x & THR_TARG.y & THR_TARG.z) -> position (x, y) and
vorticity (z) of target particles.Position is needed to calculate induction
but vorticity is not used but kept to mantain data structure coeherency;
THR_SRC (3x double THR_SRC.x & THR_SRC.y & THR_SRC.z) -> position (x, y) and
vorticity (z) of source particles, needed to calculate induction.
Gives as output:
THR_UIND (2x double THR_UIND.x & THR_UIND.y) -> updated velocity induction of targets */
{
//targets-particle distance, local variable [2 FLOPS]
// printf("Thread %d; xTarget %f; yTarget %f; xBlob %f; yBlob %f, wBlob %f\n -----------------------------------------\n",threadIdx.x,THR_xTarget,THR_yTarget,THR_xBlob,THR_yBlob,THR_wBlob);
double2 RAD;
RAD.x = THR_xTarget - THR_xBlob;
RAD.y = THR_yTarget - THR_yBlob;
//square of distance plus cut-off, local variable [4 FLOPS]
double RADSQR = RAD.x * RAD.x + RAD.y * RAD.y + myeps_gpu;
//vorticity/(2pi*sqr(rad)) [2 FLOPS]
double S = THR_wBlob * inv_pi_gpu / ksigmasqr_gpu;
//update velocity induction [4 FLOPS]
THR_vorticity += S * exp(-RADSQR/(ksigmasqr_gpu));
return THR_vorticity;
}
__device__ double ww2par_block(double BLK_xTarget, double BLK_yTarget, double BLK_vorticity)
/*BLOCK FUNCTION - data & execution management for thread block
Evaluate induction in a pxp block
Takes as input:
BLK_TARG (3x double BLK_TARG.x & BLK_TARG.y & BLK_TARG.z) -> position (x, y)
and vorticity (z) of target. Passed unchanged to THREAD CODE as TARGET;
BLK_UIND (2x double BLK_UIND.x & BLK_UIND.y) -> velocity induction of target
particles. Passed unchanged to THREAD CODE as UIND.
Gives as output:
BLK_UIND (2x double BLK_UIND.x & BLK_UIND.y) -> updated velocity induction
of target. Received unchanged by THREAD CODE as UIND */
{
extern __shared__ double BLK_blob [];
//extern __shared__ double BLK_yBlob [];
//extern __shared__ double BLK_wBlob [];
/* External variable residing in shared memory space of thread block,
accessible from all threads within the block. Source particles data array
(position (x & y) and vorticity (z)) common to the block.
Size of the array is determined at launch time with instruction [] */
//call thread function for every thread in block
for ( int i = 0; i <blockDim.x; i++)
{
BLK_vorticity = ww2par_thread(BLK_vorticity, BLK_xTarget, BLK_yTarget, BLK_blob[i], BLK_blob[i+blocksize_gpu], BLK_blob[i+2*blocksize_gpu]);
}
return BLK_vorticity;
}
__global__ void ww2par_kernel(void *cxBlob_gpu_ondevice, void *cyBlob_gpu_ondevice, void *cwBlob_gpu_ondevice, void *cxTarget_gpu_ondevice, void *cyTarget_gpu_ondevice, void *cw_gpu_ondevice)
/*KERNEL FUNCTION - data & execution management for block grid
Kernel executed on the device, called from the host.
Manages memory passages from host to device and executes block function
Takes as input:
*ONDEV_POS and *ONDEV_IND -> pointers to global device memory for the
position and induction of particles */
{
extern __shared__ double BLK_blob []; //see above
//extern __shared__ double BLK_yBlob []; //see above
//extern __shared__ double BLK_wBlob []; //see above
//pointers passage
double * KRN_xBlob = (double *)cxBlob_gpu_ondevice;
double * KRN_yBlob = (double *)cyBlob_gpu_ondevice;
double * KRN_wBlob = (double *)cwBlob_gpu_ondevice;
double * KRN_xTarget = (double *)cxTarget_gpu_ondevice;
double * KRN_yTarget = (double *)cyTarget_gpu_ondevice;
double * KRN_w = (double *)cw_gpu_ondevice;
//induction initialization
double BLK_vorticity;
BLK_vorticity = 0;
//target particles definition
double BLK_xTarget;
double BLK_yTarget;
int NTHR = blockIdx.x * blockDim.x + threadIdx.x;
BLK_xTarget = KRN_xTarget[NTHR];
BLK_yTarget = KRN_yTarget[NTHR];
//printf("Block %d; Thread %d :: Before the loop\n",blockIdx.x,threadIdx.x);
int i, block;
for (i = 0, block = 0; i < nParticles_gpu; i += blocksize_gpu, block++)//LOOP over blocks
{
//source particle definition (shared data)
int id = block * blockDim.x + threadIdx.x;
BLK_blob [threadIdx.x] = KRN_xBlob[id];
BLK_blob [threadIdx.x + blocksize_gpu] = KRN_yBlob[id];
BLK_blob [threadIdx.x + 2*blocksize_gpu] = KRN_wBlob[id];
__syncthreads();
// all shared memory locations are populated before starting computation
BLK_vorticity = ww2par_block(BLK_xTarget, BLK_yTarget, BLK_vorticity); //block function call
__syncthreads();
//all threads within block finish computation before advancing next block
}
//save results in global memory
double WIND = BLK_vorticity;
KRN_w[NTHR] = WIND;
}
|
a47302d3cabf722b865d86d12f6acdfb8e6ab535.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "_logploss.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int nrows = 1;
int ncols = 1;
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
float *dy = NULL;
hipMalloc(&dy, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
_logploss), dim3(gridBlock),dim3(threadBlock), 0, 0, nrows,ncols,y,dy);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
_logploss), dim3(gridBlock),dim3(threadBlock), 0, 0, nrows,ncols,y,dy);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
_logploss), dim3(gridBlock),dim3(threadBlock), 0, 0, nrows,ncols,y,dy);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a47302d3cabf722b865d86d12f6acdfb8e6ab535.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "_logploss.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int nrows = 1;
int ncols = 1;
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
float *dy = NULL;
cudaMalloc(&dy, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
_logploss<<<gridBlock,threadBlock>>>(nrows,ncols,y,dy);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
_logploss<<<gridBlock,threadBlock>>>(nrows,ncols,y,dy);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
_logploss<<<gridBlock,threadBlock>>>(nrows,ncols,y,dy);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9ab10edd91050ea08e860b3f83483f5d4eef5410.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <iostream>
#include "stdlib.h"
#include "utils.h"
#include "stdlib.h"
#include "cuda_error_check.cuh"
#include "initial_graph.cuh"
#include "parse_graph.cuh"
#define SSSP_INF 1073741824
int compare(const void *e1, const void *e2)
{
const SdwEdge *edge1 = (SdwEdge *)e1;
const SdwEdge *edge2 = (SdwEdge *)e2;
if (edge1->src <= edge2->src)
//if (edge1->dst <= edge2->dst)
{
return -1;
}
return 1;
}
bool disChange(unsigned int *dp, unsigned int *dc, int len)
{
int i;
for (i = 0; i < len; i++)
{
if (dp[i] != dc[i])
{
return true;
}
}
return false;
}
__global__ void pulling_kernel(SdwEdge *gpuElist, int listlen, unsigned int *gpuDisCur, int *gpuChange, int spanSize, int blockNum, int workPerwarp,int warpNum){
int blockSize = blockDim.x;
int threadidKernal = blockIdx.x * blockSize + threadIdx.x;
//printf("warp num %d span size %d\n", warpNum,spanSize);
int warpId= threadidKernal / spanSize;
int laneId = threadidKernal % spanSize;
//int beg = gbase * spanSize + threadidKernal;
int beg=workPerwarp*warpId+laneId;
int end = min(listlen,beg+workPerwarp);
int src,dst,weight,tempDist,tmpOld;
int i;
//printf("tid %d workload %d warpid %d beg %d end %d\n",threadidKernal,workPerwarp,warpId,beg,end);
for (i = beg; i < end; i += spanSize)
{
src = gpuElist[i].src;
dst = gpuElist[i].dst;
weight = gpuElist[i].weight;
tempDist = gpuDisCur[src] + weight;
//if(src==0 && i<32){
// printf("index i %d src %d dst %d weight %d gpuDisCur[dst] %d\n",i,src,dst,weight,gpuDisCur[dst]);
//}
if (tempDist < gpuDisCur[dst])
{
tmpOld = gpuDisCur[dst];
atomicMin(&gpuDisCur[dst], tempDist);
if (tmpOld != gpuDisCur[dst])
{
atomicExch(gpuChange, 1);
//printf("dst %d old %d new %d\n",dst,tmpOld,gpuDisCur[dst]);
}
}
}
}
void pullerSortBySrc(std::vector<initial_vertex> *peeps, int blockSize, int blockNum)
{
if (blockSize % 32 != 0)
{
printf("blockSize should be the multiple of 32\n");
exit(1);
}
printf("start puller, sorted by src\n");
setTime();
//Do all the things here!
int i, j;
int nbLen;
//input parameter is a inverse adjacent list, transfer it into csv file
int vertexNum = peeps->size();
int edgeNum = 0;
for (i = 0; i < vertexNum; i++)
{
nbLen = (*peeps)[i].nbrs.size();
edgeNum = edgeNum + nbLen;
}
//printf("vertex num %d edge number %d\n", vertexNum, edgeNum);
//std::vector<SdwEdge*> edgeList;
SdwEdge *edgeList = (SdwEdge *)malloc(sizeof(SdwEdge) * edgeNum);
if (edgeList == NULL)
{
printf("malloc fail");
exit(1);
}
int edgeIndex = 0;
for (i = 0; i < vertexNum; i++)
{
nbLen = (*peeps)[i].nbrs.size();
for (j = 0; j < nbLen; j++)
{
edgeList[edgeIndex].dst = i;
edgeList[edgeIndex].src = (*peeps)[i].nbrs[j].srcIndex;
edgeList[edgeIndex].weight = (*peeps)[i].nbrs[j].edgeValue.weight;
edgeIndex++;
}
}
//sort
qsort(edgeList, edgeNum, sizeof(SdwEdge), compare);
//check after sorting
//for (i = 0; i < edgeNum; i++)
//{
// printf("src (%d) dst (%d) wieght (%d)\n", edgeList[i].src, edgeList[i].dst, edgeList[i].weight);
//}
unsigned int *DisCur = (unsigned int *)malloc(sizeof(unsigned int) * vertexNum);
unsigned int *newDisCur = (unsigned int *)malloc(sizeof(unsigned int) * vertexNum);
DisCur[0] = 0;
for (i = 1; i < vertexNum; i++)
{
DisCur[i] = SSSP_INF;
}
DisCur[edgeList[0].src]=0;
//check init dist
//for (i = 0; i < vertexNum; i++)
//{
// printf("index %d dist %d\n", i, finalDist[i]);
//}
//init the parameters on GPU
SdwEdge *gpuElist;
unsigned int *gpuDisCur;
hipMallocManaged((void **)&gpuElist, sizeof(SdwEdge) * edgeNum);
hipMemcpy(gpuElist, edgeList, sizeof(SdwEdge) * edgeNum, hipMemcpyHostToDevice);
//memcpy(gpuElist, edgeList, sizeof(SdwEdge) * edgeNum);
hipMallocManaged((void **)&gpuDisCur, sizeof(unsigned int) * vertexNum);
//memcpy(gpuDisCur, DisCur, sizeof(unsigned int) * vertexNum);
hipMemcpy(gpuDisCur, DisCur, sizeof(unsigned int) * vertexNum, hipMemcpyHostToDevice);
//copy in every iteration
//gpu task
int *gpuBase;
int *gpuChange;
hipMallocManaged((void **)&(gpuBase), sizeof(int));
hipMallocManaged((void **)&(gpuChange), sizeof(int));
int iteIndex = 1;
int change = 0;
int spanSize = 32;
int warpNum=blockNum*blockSize/spanSize;
int workPerwarp;
int reminder;
int extra;
int listlen=edgeNum;
if (listlen % warpNum == 0)
{
workPerwarp = listlen / warpNum;
}
else
{
reminder=listlen % warpNum;
if(reminder % warpNum==0){
extra=reminder/warpNum;
}else{
extra=(reminder/warpNum)+1;
}
workPerwarp = extra+(listlen / warpNum);
}
struct timespec gpuStart, gpuEnd;
double gpuTotal = 0;
for (iteIndex = 0; iteIndex < edgeNum; iteIndex++)
//for (iteIndex = 0; iteIndex < 1000; iteIndex++)
{
//for (iteIndex=0; iteIndex<2; iteIndex++){
//printf("debug point 0\n");
//gpu task
//memset(distFlag,0,sizeof(int)*vertexNum);
change = 0;
//int *change=(int*)malloc(sizeof(int));
//*change=0;
//printf("iteration index %d\n", iteIndex);
hipMemcpy(gpuChange, &change, sizeof(int), hipMemcpyHostToDevice);
//trick gsharemem space array
//1 size for edgeList 1*blockSize
//2 size for distPrev 1*blockSize
//3 size for distCur 1*blockSize
//printf("debug point 1\n");
//int sharSize = blockSize * (sizeof(SdwEdge) + sizeof(unsigned int) * 2);
//int sharSize = blockSize * (sizeof(SdwEdge));
//int sharSize = workPerwarp * (sizeof(SdwEdge));
//TODO shareMem <check if shareMem is larger than limitation>
int sharSize=0;
if (blockNum * blockSize < spanSize)
{
spanSize = blockNum * blockSize;
}
//printf("iteration %d curr base %d\n",i,base);
//pulling_kernel<<<blockNum, blockSize, sharSize>>>(gpuElist, edgeNum, gpuDisCur, base, gpuChange, spanSize,iteIndex);
clock_gettime(CLOCK_MONOTONIC_RAW, &gpuStart);
hipLaunchKernelGGL(( pulling_kernel), dim3(blockNum), dim3(blockSize), sharSize, 0, gpuElist, edgeNum, gpuDisCur, gpuChange, spanSize, blockNum, workPerwarp, warpNum);
clock_gettime(CLOCK_MONOTONIC_RAW, &gpuEnd);
gpuTotal = gpuTotal + (double)1000 * (gpuEnd.tv_sec - gpuStart.tv_sec) + (double)(gpuEnd.tv_nsec - gpuStart.tv_nsec) / 1000000;
//pulling_kernel_testshar<<<blockNum, blockSize, sharSize>>>(gpuElist, edgeNum, gpuDisCur, base, gpuChange, spanSize);
//printf("debug point 3\n");
//get return value, check if change
hipMemcpy(&change, gpuChange, sizeof(int), hipMemcpyDeviceToHost);
//hipDeviceSynchronize();
//printf("iteration number %d gpuchange %d\n",iteIndex,change);
//for(i=0;i<vertexNum;i++){
// printf("iterIndex %d change %d, final index %d dis %d\n",iteIndex,change,i,DisCur[i]);
//}
//bool ifchange=disChange(DisPre,DisCur,vertexNum);
if (change == 0)
{
printf("iteration no change, ineration num %d\n",iteIndex);
//hipMemcpy(newDisCur, gpuDisCur, sizeof(unsigned int) * vertexNum, hipMemcpyDeviceToHost);
break;
}
}
//check result
//for(i=0;i<vertexNum;i++){
// printf("index %d dis %d\n",i,DisCur[i]);
//}
hipMemcpy(newDisCur, gpuDisCur, sizeof(unsigned int) * vertexNum, hipMemcpyDeviceToHost);
//update the vector
for (i = 0; i < vertexNum; i++)
{
//printf("test value %d\n",newDisCur[i]);
(*peeps)[i].vertexValue.distance = newDisCur[i];
}
hipFree(gpuElist);
hipFree(gpuDisCur);
printf("The computation kernel time on GPU is %f milli-seconds\n",gpuTotal);
std::cout << "The total computation time is " << getTime() << " milli-seconds.\n";
} | 9ab10edd91050ea08e860b3f83483f5d4eef5410.cu | #include <vector>
#include <iostream>
#include "stdlib.h"
#include "utils.h"
#include "stdlib.h"
#include "cuda_error_check.cuh"
#include "initial_graph.cuh"
#include "parse_graph.cuh"
#define SSSP_INF 1073741824
int compare(const void *e1, const void *e2)
{
const SdwEdge *edge1 = (SdwEdge *)e1;
const SdwEdge *edge2 = (SdwEdge *)e2;
if (edge1->src <= edge2->src)
//if (edge1->dst <= edge2->dst)
{
return -1;
}
return 1;
}
bool disChange(unsigned int *dp, unsigned int *dc, int len)
{
int i;
for (i = 0; i < len; i++)
{
if (dp[i] != dc[i])
{
return true;
}
}
return false;
}
__global__ void pulling_kernel(SdwEdge *gpuElist, int listlen, unsigned int *gpuDisCur, int *gpuChange, int spanSize, int blockNum, int workPerwarp,int warpNum){
int blockSize = blockDim.x;
int threadidKernal = blockIdx.x * blockSize + threadIdx.x;
//printf("warp num %d span size %d\n", warpNum,spanSize);
int warpId= threadidKernal / spanSize;
int laneId = threadidKernal % spanSize;
//int beg = gbase * spanSize + threadidKernal;
int beg=workPerwarp*warpId+laneId;
int end = min(listlen,beg+workPerwarp);
int src,dst,weight,tempDist,tmpOld;
int i;
//printf("tid %d workload %d warpid %d beg %d end %d\n",threadidKernal,workPerwarp,warpId,beg,end);
for (i = beg; i < end; i += spanSize)
{
src = gpuElist[i].src;
dst = gpuElist[i].dst;
weight = gpuElist[i].weight;
tempDist = gpuDisCur[src] + weight;
//if(src==0 && i<32){
// printf("index i %d src %d dst %d weight %d gpuDisCur[dst] %d\n",i,src,dst,weight,gpuDisCur[dst]);
//}
if (tempDist < gpuDisCur[dst])
{
tmpOld = gpuDisCur[dst];
atomicMin(&gpuDisCur[dst], tempDist);
if (tmpOld != gpuDisCur[dst])
{
atomicExch(gpuChange, 1);
//printf("dst %d old %d new %d\n",dst,tmpOld,gpuDisCur[dst]);
}
}
}
}
void pullerSortBySrc(std::vector<initial_vertex> *peeps, int blockSize, int blockNum)
{
if (blockSize % 32 != 0)
{
printf("blockSize should be the multiple of 32\n");
exit(1);
}
printf("start puller, sorted by src\n");
setTime();
//Do all the things here!
int i, j;
int nbLen;
//input parameter is a inverse adjacent list, transfer it into csv file
int vertexNum = peeps->size();
int edgeNum = 0;
for (i = 0; i < vertexNum; i++)
{
nbLen = (*peeps)[i].nbrs.size();
edgeNum = edgeNum + nbLen;
}
//printf("vertex num %d edge number %d\n", vertexNum, edgeNum);
//std::vector<SdwEdge*> edgeList;
SdwEdge *edgeList = (SdwEdge *)malloc(sizeof(SdwEdge) * edgeNum);
if (edgeList == NULL)
{
printf("malloc fail");
exit(1);
}
int edgeIndex = 0;
for (i = 0; i < vertexNum; i++)
{
nbLen = (*peeps)[i].nbrs.size();
for (j = 0; j < nbLen; j++)
{
edgeList[edgeIndex].dst = i;
edgeList[edgeIndex].src = (*peeps)[i].nbrs[j].srcIndex;
edgeList[edgeIndex].weight = (*peeps)[i].nbrs[j].edgeValue.weight;
edgeIndex++;
}
}
//sort
qsort(edgeList, edgeNum, sizeof(SdwEdge), compare);
//check after sorting
//for (i = 0; i < edgeNum; i++)
//{
// printf("src (%d) dst (%d) wieght (%d)\n", edgeList[i].src, edgeList[i].dst, edgeList[i].weight);
//}
unsigned int *DisCur = (unsigned int *)malloc(sizeof(unsigned int) * vertexNum);
unsigned int *newDisCur = (unsigned int *)malloc(sizeof(unsigned int) * vertexNum);
DisCur[0] = 0;
for (i = 1; i < vertexNum; i++)
{
DisCur[i] = SSSP_INF;
}
DisCur[edgeList[0].src]=0;
//check init dist
//for (i = 0; i < vertexNum; i++)
//{
// printf("index %d dist %d\n", i, finalDist[i]);
//}
//init the parameters on GPU
SdwEdge *gpuElist;
unsigned int *gpuDisCur;
cudaMallocManaged((void **)&gpuElist, sizeof(SdwEdge) * edgeNum);
cudaMemcpy(gpuElist, edgeList, sizeof(SdwEdge) * edgeNum, cudaMemcpyHostToDevice);
//memcpy(gpuElist, edgeList, sizeof(SdwEdge) * edgeNum);
cudaMallocManaged((void **)&gpuDisCur, sizeof(unsigned int) * vertexNum);
//memcpy(gpuDisCur, DisCur, sizeof(unsigned int) * vertexNum);
cudaMemcpy(gpuDisCur, DisCur, sizeof(unsigned int) * vertexNum, cudaMemcpyHostToDevice);
//copy in every iteration
//gpu task
int *gpuBase;
int *gpuChange;
cudaMallocManaged((void **)&(gpuBase), sizeof(int));
cudaMallocManaged((void **)&(gpuChange), sizeof(int));
int iteIndex = 1;
int change = 0;
int spanSize = 32;
int warpNum=blockNum*blockSize/spanSize;
int workPerwarp;
int reminder;
int extra;
int listlen=edgeNum;
if (listlen % warpNum == 0)
{
workPerwarp = listlen / warpNum;
}
else
{
reminder=listlen % warpNum;
if(reminder % warpNum==0){
extra=reminder/warpNum;
}else{
extra=(reminder/warpNum)+1;
}
workPerwarp = extra+(listlen / warpNum);
}
struct timespec gpuStart, gpuEnd;
double gpuTotal = 0;
for (iteIndex = 0; iteIndex < edgeNum; iteIndex++)
//for (iteIndex = 0; iteIndex < 1000; iteIndex++)
{
//for (iteIndex=0; iteIndex<2; iteIndex++){
//printf("debug point 0\n");
//gpu task
//memset(distFlag,0,sizeof(int)*vertexNum);
change = 0;
//int *change=(int*)malloc(sizeof(int));
//*change=0;
//printf("iteration index %d\n", iteIndex);
cudaMemcpy(gpuChange, &change, sizeof(int), cudaMemcpyHostToDevice);
//trick gsharemem space array
//1 size for edgeList 1*blockSize
//2 size for distPrev 1*blockSize
//3 size for distCur 1*blockSize
//printf("debug point 1\n");
//int sharSize = blockSize * (sizeof(SdwEdge) + sizeof(unsigned int) * 2);
//int sharSize = blockSize * (sizeof(SdwEdge));
//int sharSize = workPerwarp * (sizeof(SdwEdge));
//TODO shareMem <check if shareMem is larger than limitation>
int sharSize=0;
if (blockNum * blockSize < spanSize)
{
spanSize = blockNum * blockSize;
}
//printf("iteration %d curr base %d\n",i,base);
//pulling_kernel<<<blockNum, blockSize, sharSize>>>(gpuElist, edgeNum, gpuDisCur, base, gpuChange, spanSize,iteIndex);
clock_gettime(CLOCK_MONOTONIC_RAW, &gpuStart);
pulling_kernel<<<blockNum, blockSize, sharSize>>>(gpuElist, edgeNum, gpuDisCur, gpuChange, spanSize, blockNum, workPerwarp, warpNum);
clock_gettime(CLOCK_MONOTONIC_RAW, &gpuEnd);
gpuTotal = gpuTotal + (double)1000 * (gpuEnd.tv_sec - gpuStart.tv_sec) + (double)(gpuEnd.tv_nsec - gpuStart.tv_nsec) / 1000000;
//pulling_kernel_testshar<<<blockNum, blockSize, sharSize>>>(gpuElist, edgeNum, gpuDisCur, base, gpuChange, spanSize);
//printf("debug point 3\n");
//get return value, check if change
cudaMemcpy(&change, gpuChange, sizeof(int), cudaMemcpyDeviceToHost);
//cudaDeviceSynchronize();
//printf("iteration number %d gpuchange %d\n",iteIndex,change);
//for(i=0;i<vertexNum;i++){
// printf("iterIndex %d change %d, final index %d dis %d\n",iteIndex,change,i,DisCur[i]);
//}
//bool ifchange=disChange(DisPre,DisCur,vertexNum);
if (change == 0)
{
printf("iteration no change, ineration num %d\n",iteIndex);
//cudaMemcpy(newDisCur, gpuDisCur, sizeof(unsigned int) * vertexNum, cudaMemcpyDeviceToHost);
break;
}
}
//check result
//for(i=0;i<vertexNum;i++){
// printf("index %d dis %d\n",i,DisCur[i]);
//}
cudaMemcpy(newDisCur, gpuDisCur, sizeof(unsigned int) * vertexNum, cudaMemcpyDeviceToHost);
//update the vector
for (i = 0; i < vertexNum; i++)
{
//printf("test value %d\n",newDisCur[i]);
(*peeps)[i].vertexValue.distance = newDisCur[i];
}
cudaFree(gpuElist);
cudaFree(gpuDisCur);
printf("The computation kernel time on GPU is %f milli-seconds\n",gpuTotal);
std::cout << "The total computation time is " << getTime() << " milli-seconds.\n";
} |
e746cd0e0dc5fa610f3677ac3cc950848b31f2e8.hip | // !!! This is a file automatically generated by hipify!!!
//
// SkyNet Project
// Copyright (C) 2018 by Contributors <https://github.com/Tyill/skynet>
//
// This code is licensed under the MIT License.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files(the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions :
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
#include <hip/hip_runtime.h>
#include <cudnn.h>
#include "../stdafx.h"
#include "snOperatorCUDA/src/Operator/pooling.h"
using namespace std;
using namespace SN_Base;
struct gpuParams{
cudnnHandle_t cudnn = 0;
cudnnPoolingDescriptor_t pool_desc = 0;
cudnnTensorDescriptor_t in_desc = 0;
cudnnTensorDescriptor_t out_desc = 0;
cudnnTensorDescriptor_t grin_desc = 0;
cudnnTensorDescriptor_t grout_desc = 0;
};
void Pooling::iniParamCUDA(bool isLern, const snSize& insz, const snSize& outsz, const poolParams& prms, void** pGpuPrm){
bool isFirst = false;
gpuParams* gpuPrm = (gpuParams*)*pGpuPrm;
if (!gpuPrm){
hipDeviceProp_t cu_deviceProps;
hipGetDeviceProperties(&cu_deviceProps, 0);
if (cu_deviceProps.major < 3){
ERROR_MESS("%s requires SM >= 3.0");
return;
}
gpuPrm = new gpuParams();
memset(gpuPrm, 0, sizeof(gpuParams));
*pGpuPrm = gpuPrm;
cudnnHandle_t cudnn = nullptr;
cuCHECK(cudnnCreate(&cudnn));
gpuPrm->cudnn = cudnn;
isFirst = true;
}
// input
cudnnTensorDescriptor_t in_desc = nullptr;
cuCHECK(cudnnCreateTensorDescriptor(&in_desc));
cuCHECK(cudnnSetTensor4dDescriptor(in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(insz.n), int(insz.d), int(insz.h), int(insz.w)));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->in_desc));
gpuPrm->in_desc = in_desc;
// pool
cudnnPoolingDescriptor_t pool_desc = nullptr;
cuCHECK(cudnnCreatePoolingDescriptor(&pool_desc));
cudnnPoolingMode_t poolT = cudnnPoolingMode_t::CUDNN_POOLING_MAX;
if (prms.type == poolType::avg)
poolT = cudnnPoolingMode_t::CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
cuCHECK(cudnnSetPooling2dDescriptor(pool_desc, poolT, cudnnNanPropagation_t::CUDNN_NOT_PROPAGATE_NAN,
int(prms.kernel), int(prms.kernel), int(prms.paddingH), int(prms.paddingW), int(prms.stride), int(prms.stride)));
if (!isFirst)
cuCHECK(cudnnDestroyPoolingDescriptor(gpuPrm->pool_desc));
gpuPrm->pool_desc = pool_desc;
// output
int out_n = 0, out_c = 0, out_h = 0, out_w = 0;
cuCHECK(cudnnGetPooling2dForwardOutputDim(pool_desc, in_desc,
&out_n, &out_c, &out_h, &out_w));
if (outsz != snSize(out_w, out_h, out_c, out_n)){
ERROR_MESS("CUDA error: outsz != snSize(out_w, out_h, out_c, out_n)");
return;
}
cudnnTensorDescriptor_t out_desc;
cuCHECK(cudnnCreateTensorDescriptor(&out_desc));
cuCHECK(cudnnSetTensor4dDescriptor(out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
out_n, out_c, out_h, out_w));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->out_desc));
gpuPrm->out_desc = out_desc;
if (isLern){
// grout
cudnnTensorDescriptor_t grout_desc;
cuCHECK(cudnnCreateTensorDescriptor(&grout_desc));
cuCHECK(cudnnSetTensor4dDescriptor(grout_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(insz.n), int(insz.d), int(insz.h), int(insz.w)));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grout_desc));
gpuPrm->grout_desc = grout_desc;
// grin
cudnnTensorDescriptor_t grin_desc;
cuCHECK(cudnnCreateTensorDescriptor(&grin_desc));
cuCHECK(cudnnSetTensor4dDescriptor(grin_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
out_n, out_c, out_h, out_w));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grin_desc));
gpuPrm->grin_desc = grin_desc;
}
}
void Pooling::freeParamCUDA(void* gpuPrms){
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
if (!gpuPrm) return;
cuCHECK(cudnnDestroy(gpuPrm->cudnn));
cuCHECK(cudnnDestroyPoolingDescriptor(gpuPrm->pool_desc));
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->in_desc));
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->out_desc));
if (gpuPrm->grin_desc){ // isLern
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grin_desc));
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grout_desc));
}
}
__global__ void cuFiltrNegative(snSize outsz, snFloat* out){
out += blockIdx.x * outsz.w * outsz.h + blockIdx.y * outsz.w * outsz.h * outsz.d;
unsigned int i = threadIdx.x;
while (i < (outsz.w * outsz.h)){
if (out[i] < 0)
out[i] = 0.0;
i += blockDim.x;
}
}
void Pooling::forwardCUDA(const poolParams& poolPrms, const snSize& insz, const snFloat* input,
const snSize& outsz, snFloat* output, void* gpuPrms){
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
// run
snFloat alpha = 1.f, beta = 0.f;
cuCHECK(cudnnPoolingForward(gpuPrm->cudnn,
gpuPrm->pool_desc,
&alpha,
gpuPrm->in_desc,
input,
&beta,
gpuPrm->out_desc,
output));
// filtrNegative
dim3 dimBlock(128);
dim3 dimGrid(int(outsz.d), int(outsz.n));
cuFiltrNegative << < dimGrid, dimBlock >> >(outsz, output);
}
void Pooling::backwardCUDA(const poolParams& poolPrms, const snSize& outsz, const snFloat* output, const snFloat* gradIn,
const snSize& insz, const snFloat* input, snFloat* gradOut, void* gpuPrms){
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
// run
snFloat alpha = 1.f, beta = 0.f;
cuCHECK(cudnnPoolingBackward(gpuPrm->cudnn,
gpuPrm->pool_desc,
&alpha,
gpuPrm->out_desc,
output,
gpuPrm->grin_desc,
gradIn,
gpuPrm->in_desc,
input,
&beta,
gpuPrm->grout_desc,
gradOut));
}
| e746cd0e0dc5fa610f3677ac3cc950848b31f2e8.cu | //
// SkyNet Project
// Copyright (C) 2018 by Contributors <https://github.com/Tyill/skynet>
//
// This code is licensed under the MIT License.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files(the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions :
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
#include <cuda_runtime.h>
#include <cudnn.h>
#include "../stdafx.h"
#include "snOperatorCUDA/src/Operator/pooling.h"
using namespace std;
using namespace SN_Base;
struct gpuParams{
cudnnHandle_t cudnn = 0;
cudnnPoolingDescriptor_t pool_desc = 0;
cudnnTensorDescriptor_t in_desc = 0;
cudnnTensorDescriptor_t out_desc = 0;
cudnnTensorDescriptor_t grin_desc = 0;
cudnnTensorDescriptor_t grout_desc = 0;
};
void Pooling::iniParamCUDA(bool isLern, const snSize& insz, const snSize& outsz, const poolParams& prms, void** pGpuPrm){
bool isFirst = false;
gpuParams* gpuPrm = (gpuParams*)*pGpuPrm;
if (!gpuPrm){
cudaDeviceProp cu_deviceProps;
cudaGetDeviceProperties(&cu_deviceProps, 0);
if (cu_deviceProps.major < 3){
ERROR_MESS("%s requires SM >= 3.0");
return;
}
gpuPrm = new gpuParams();
memset(gpuPrm, 0, sizeof(gpuParams));
*pGpuPrm = gpuPrm;
cudnnHandle_t cudnn = nullptr;
cuCHECK(cudnnCreate(&cudnn));
gpuPrm->cudnn = cudnn;
isFirst = true;
}
// input
cudnnTensorDescriptor_t in_desc = nullptr;
cuCHECK(cudnnCreateTensorDescriptor(&in_desc));
cuCHECK(cudnnSetTensor4dDescriptor(in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(insz.n), int(insz.d), int(insz.h), int(insz.w)));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->in_desc));
gpuPrm->in_desc = in_desc;
// pool
cudnnPoolingDescriptor_t pool_desc = nullptr;
cuCHECK(cudnnCreatePoolingDescriptor(&pool_desc));
cudnnPoolingMode_t poolT = cudnnPoolingMode_t::CUDNN_POOLING_MAX;
if (prms.type == poolType::avg)
poolT = cudnnPoolingMode_t::CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
cuCHECK(cudnnSetPooling2dDescriptor(pool_desc, poolT, cudnnNanPropagation_t::CUDNN_NOT_PROPAGATE_NAN,
int(prms.kernel), int(prms.kernel), int(prms.paddingH), int(prms.paddingW), int(prms.stride), int(prms.stride)));
if (!isFirst)
cuCHECK(cudnnDestroyPoolingDescriptor(gpuPrm->pool_desc));
gpuPrm->pool_desc = pool_desc;
// output
int out_n = 0, out_c = 0, out_h = 0, out_w = 0;
cuCHECK(cudnnGetPooling2dForwardOutputDim(pool_desc, in_desc,
&out_n, &out_c, &out_h, &out_w));
if (outsz != snSize(out_w, out_h, out_c, out_n)){
ERROR_MESS("CUDA error: outsz != snSize(out_w, out_h, out_c, out_n)");
return;
}
cudnnTensorDescriptor_t out_desc;
cuCHECK(cudnnCreateTensorDescriptor(&out_desc));
cuCHECK(cudnnSetTensor4dDescriptor(out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
out_n, out_c, out_h, out_w));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->out_desc));
gpuPrm->out_desc = out_desc;
if (isLern){
// grout
cudnnTensorDescriptor_t grout_desc;
cuCHECK(cudnnCreateTensorDescriptor(&grout_desc));
cuCHECK(cudnnSetTensor4dDescriptor(grout_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(insz.n), int(insz.d), int(insz.h), int(insz.w)));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grout_desc));
gpuPrm->grout_desc = grout_desc;
// grin
cudnnTensorDescriptor_t grin_desc;
cuCHECK(cudnnCreateTensorDescriptor(&grin_desc));
cuCHECK(cudnnSetTensor4dDescriptor(grin_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
out_n, out_c, out_h, out_w));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grin_desc));
gpuPrm->grin_desc = grin_desc;
}
}
void Pooling::freeParamCUDA(void* gpuPrms){
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
if (!gpuPrm) return;
cuCHECK(cudnnDestroy(gpuPrm->cudnn));
cuCHECK(cudnnDestroyPoolingDescriptor(gpuPrm->pool_desc));
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->in_desc));
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->out_desc));
if (gpuPrm->grin_desc){ // isLern
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grin_desc));
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grout_desc));
}
}
__global__ void cuFiltrNegative(snSize outsz, snFloat* out){
out += blockIdx.x * outsz.w * outsz.h + blockIdx.y * outsz.w * outsz.h * outsz.d;
unsigned int i = threadIdx.x;
while (i < (outsz.w * outsz.h)){
if (out[i] < 0)
out[i] = 0.0;
i += blockDim.x;
}
}
void Pooling::forwardCUDA(const poolParams& poolPrms, const snSize& insz, const snFloat* input,
const snSize& outsz, snFloat* output, void* gpuPrms){
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
// run
snFloat alpha = 1.f, beta = 0.f;
cuCHECK(cudnnPoolingForward(gpuPrm->cudnn,
gpuPrm->pool_desc,
&alpha,
gpuPrm->in_desc,
input,
&beta,
gpuPrm->out_desc,
output));
// filtrNegative
dim3 dimBlock(128);
dim3 dimGrid(int(outsz.d), int(outsz.n));
cuFiltrNegative << < dimGrid, dimBlock >> >(outsz, output);
}
void Pooling::backwardCUDA(const poolParams& poolPrms, const snSize& outsz, const snFloat* output, const snFloat* gradIn,
const snSize& insz, const snFloat* input, snFloat* gradOut, void* gpuPrms){
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
// run
snFloat alpha = 1.f, beta = 0.f;
cuCHECK(cudnnPoolingBackward(gpuPrm->cudnn,
gpuPrm->pool_desc,
&alpha,
gpuPrm->out_desc,
output,
gpuPrm->grin_desc,
gradIn,
gpuPrm->in_desc,
input,
&beta,
gpuPrm->grout_desc,
gradOut));
}
|
4a8b8ef8bdd75606632515015fc0ceac640a5fee.hip | // !!! This is a file automatically generated by hipify!!!
// includes, cuda
#include <hip/hip_runtime.h>
#include <cudaDefs.h>
#include <Utils/imageManager.h>
#include "../cudautil.cuh"
#include "imageKernels.cuh"
#define BLOCK_DIM 8
static hipError_t error = hipSuccess;
static hipDeviceProp_t deviceProp = hipDeviceProp_t();
texture<float, 2, hipReadModeElementType> texRef; // declared texture reference must be at file-scope !!!
static hipChannelFormatDesc texChannelDesc;
static unsigned char* dImageData = nullptr;
static unsigned int imageWidth;
static unsigned int imageHeight;
static unsigned int imageBPP; //Bits Per Pixel = 8, 16, 24, or 32 bit
static unsigned int imagePitch;
static size_t texPitch;
static float* dLinearPitchTextureData = nullptr;
static hipArray* dArrayTextureData = nullptr;
static KernelSetting ks;
static float* dOutputData = nullptr;
static void loadSourceImage(const char* imageFileName)
{
FreeImage_Initialise();
FIBITMAP* tmp = ImageManager::GenericLoader(imageFileName, 0);
imageWidth = FreeImage_GetWidth(tmp);
imageHeight = FreeImage_GetHeight(tmp);
imageBPP = FreeImage_GetBPP(tmp);
imagePitch = FreeImage_GetPitch(tmp); // FREEIMAGE align row data ... You have to use pitch instead of width
CHECK_CUDA_CALL(hipMalloc((void**)&dImageData, imagePitch * imageHeight * imageBPP/8));
CHECK_CUDA_CALL(hipMemcpy(dImageData, FreeImage_GetBits(tmp), imagePitch * imageHeight * imageBPP/8, hipMemcpyHostToDevice));
checkHostMatrix<unsigned char>(FreeImage_GetBits(tmp), imagePitch, imageHeight, imageWidth, "%hhu ", "Result of Linear Pitch Text");
checkDeviceMatrix<unsigned char>(dImageData, imagePitch, imageHeight, imageWidth, "%hhu ", "Result of Linear Pitch Text");
FreeImage_Unload(tmp);
FreeImage_DeInitialise();
}
static void createTextureFromLinearPitchMemory()
{
// TODO: Allocate dLinearPitchTextureData variable memory
CHECK_CUDA_CALL(hipMallocPitch(&dLinearPitchTextureData, &texPitch, imageWidth * sizeof(float), imageHeight));
dim3 blockDim(8, 8);
dim3 gridDim(getNumberOfParts(imageWidth, blockDim.x), getNumberOfParts(imageHeight, blockDim.y));
switch (imageBPP)
{
//TODO: Here call your kernel to convert image into linearPitch memory
case 8:
colorToFloat<8> << <gridDim, blockDim>> > (dImageData, imageWidth, imageHeight, imagePitch, texPitch / sizeof(float), dLinearPitchTextureData);
break;
case 16:
colorToFloat<16> << <gridDim, blockDim >> > (dImageData, imageWidth, imageHeight, imagePitch, texPitch / sizeof(float), dLinearPitchTextureData);
break;
case 24:
colorToFloat<24> << <gridDim, blockDim >> > (dImageData, imageWidth, imageHeight, imagePitch, texPitch / sizeof(float), dLinearPitchTextureData);
break;
case 32:
colorToFloat<32> << <gridDim, blockDim >> > (dImageData, imageWidth, imageHeight, imagePitch, texPitch / sizeof(float), dLinearPitchTextureData);
break;
}
checkDeviceMatrix<float>(dLinearPitchTextureData, texPitch, imageHeight, imageWidth, "%6.1f ", "Result of Linear Pitch Text");
//TODO: Define texture channel descriptor (texChannelDesc)
texChannelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); //hipCreateChannelDesc<float>();
//TODO: Define texture (texRef) parameters
texRef.normalized = 0;
texRef.addressMode[0] = hipAddressModeClamp; // horizontal
texRef.addressMode[1] = hipAddressModeClamp; // vertical
texRef.filterMode = hipFilterModePoint;
//TODO: Bind texture
CHECK_CUDA_CALL(hipBindTexture2D(NULL, &texRef, dLinearPitchTextureData, &texChannelDesc, imageWidth, imageHeight, texPitch));
}
static void createTextureFrom2DArray()
{
//TODO: Define texture channel descriptor (texChannelDesc)
texChannelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); //hipCreateChannelDesc<float>();
//TODO: Define texture (texRef) parameters
texRef.normalized = 0;
texRef.addressMode[0] = hipAddressModeClamp; // horizontal
texRef.addressMode[1] = hipAddressModeClamp; // vertical
texRef.filterMode = hipFilterModePoint;
//Converts custom image data to float and stores result in the float_linear_data
float* dLinearTextureData = nullptr;
CHECK_CUDA_CALL(hipMalloc((void**)&dLinearTextureData, imageWidth * imageHeight * sizeof(float)));
dim3 blockDim(8, 8);
dim3 gridDim(getNumberOfParts(imageWidth, blockDim.x), getNumberOfParts(imageHeight, blockDim.y));
switch(imageBPP)
{
//TODO: Here call your kernel to convert image into linear memory (no pitch!!!)
case 8:
colorToFloat<8> << <gridDim, blockDim >> > (dImageData, imageWidth, imageHeight, imagePitch, imageWidth, dLinearTextureData);
break;
case 16:
colorToFloat<16> << <gridDim, blockDim >> > (dImageData, imageWidth, imageHeight, imagePitch, imageWidth, dLinearTextureData);
break;
case 24:
colorToFloat<24> << <gridDim, blockDim >> > (dImageData, imageWidth, imageHeight, imagePitch, imageWidth, dLinearTextureData);
break;
case 32:
colorToFloat<32> << <gridDim, blockDim >> > (dImageData, imageWidth, imageHeight, imagePitch, imageWidth, dLinearTextureData);
break;
}
checkDeviceMatrix<float>(dLinearTextureData, imageWidth, imageHeight, imageWidth, "%6.1f ", "Result of Linear Text");
CHECK_CUDA_CALL(hipMallocArray(&dArrayTextureData, &texChannelDesc, imageWidth, imageHeight));
//TODO: copy data into cuda array (dArrayTextureData)
CHECK_CUDA_CALL(hipMemcpyToArray(dArrayTextureData, 0, 0, dLinearTextureData, imageWidth * imageHeight * sizeof(float), hipMemcpyDeviceToDevice));
checkDeviceArray<float>(dArrayTextureData, imageWidth, imageHeight, imageWidth, "%6.1f", "Texture array");
//TODO: Bind texture
CHECK_CUDA_CALL(hipBindTextureToArray(&texRef, dArrayTextureData, &texChannelDesc));
CHECK_CUDA_CALL(hipFree(dLinearTextureData));
}
static void releaseMemory()
{
CHECK_CUDA_CALL(hipUnbindTexture(&texRef));
if (dImageData!=0)
CHECK_CUDA_CALL(hipFree(dImageData));
if (dLinearPitchTextureData!=0)
CHECK_CUDA_CALL(hipFree(dLinearPitchTextureData));
if (dArrayTextureData)
CHECK_CUDA_CALL(hipFreeArray(dArrayTextureData));
if (dOutputData)
CHECK_CUDA_CALL(hipFree(dOutputData));
}
static __global__ void texKernel(const unsigned int texWidth, const unsigned int texHeight, float* dst)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
//TODO some kernel
}
void cviko5()
{
loadSourceImage("textures/terrain10x10.tif");
CHECK_CUDA_CALL(hipMalloc((void**)&dOutputData, imageWidth * imageHeight * sizeof(float)));
ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1);
ks.blockSize = BLOCK_DIM * BLOCK_DIM;
ks.dimGrid = dim3((imageWidth + BLOCK_DIM-1)/BLOCK_DIM, (imageHeight + BLOCK_DIM-1)/BLOCK_DIM, 1);
//Test 1 - texture stored in linear pitch memory
/*createTextureFromLinearPitchMemory();
hipLaunchKernelGGL(( texKernel), dim3(ks.dimGrid), dim3(ks.dimBlock), 0, 0, imageWidth, imageHeight, dOutputData);
checkDeviceMatrix<float>(dOutputData, imageWidth * sizeof(float), imageHeight, imageWidth, "%6.1f ", "dOutputData");*/
//Test 2 - texture stored in 2D array
createTextureFrom2DArray();
hipLaunchKernelGGL(( texKernel), dim3(ks.dimGrid), dim3(ks.dimBlock), 0, 0, imageWidth, imageHeight, dOutputData);
checkDeviceMatrix<float>(dOutputData, imageWidth * sizeof(float), imageHeight, imageWidth, "%6.1f ", "dOutputData");
releaseMemory();
}
| 4a8b8ef8bdd75606632515015fc0ceac640a5fee.cu | // includes, cuda
#include <cuda_runtime.h>
#include <cudaDefs.h>
#include <Utils/imageManager.h>
#include "../cudautil.cuh"
#include "imageKernels.cuh"
#define BLOCK_DIM 8
static cudaError_t error = cudaSuccess;
static cudaDeviceProp deviceProp = cudaDeviceProp();
texture<float, 2, cudaReadModeElementType> texRef; // declared texture reference must be at file-scope !!!
static cudaChannelFormatDesc texChannelDesc;
static unsigned char* dImageData = nullptr;
static unsigned int imageWidth;
static unsigned int imageHeight;
static unsigned int imageBPP; //Bits Per Pixel = 8, 16, 24, or 32 bit
static unsigned int imagePitch;
static size_t texPitch;
static float* dLinearPitchTextureData = nullptr;
static cudaArray* dArrayTextureData = nullptr;
static KernelSetting ks;
static float* dOutputData = nullptr;
static void loadSourceImage(const char* imageFileName)
{
FreeImage_Initialise();
FIBITMAP* tmp = ImageManager::GenericLoader(imageFileName, 0);
imageWidth = FreeImage_GetWidth(tmp);
imageHeight = FreeImage_GetHeight(tmp);
imageBPP = FreeImage_GetBPP(tmp);
imagePitch = FreeImage_GetPitch(tmp); // FREEIMAGE align row data ... You have to use pitch instead of width
CHECK_CUDA_CALL(cudaMalloc((void**)&dImageData, imagePitch * imageHeight * imageBPP/8));
CHECK_CUDA_CALL(cudaMemcpy(dImageData, FreeImage_GetBits(tmp), imagePitch * imageHeight * imageBPP/8, cudaMemcpyHostToDevice));
checkHostMatrix<unsigned char>(FreeImage_GetBits(tmp), imagePitch, imageHeight, imageWidth, "%hhu ", "Result of Linear Pitch Text");
checkDeviceMatrix<unsigned char>(dImageData, imagePitch, imageHeight, imageWidth, "%hhu ", "Result of Linear Pitch Text");
FreeImage_Unload(tmp);
FreeImage_DeInitialise();
}
static void createTextureFromLinearPitchMemory()
{
// TODO: Allocate dLinearPitchTextureData variable memory
CHECK_CUDA_CALL(cudaMallocPitch(&dLinearPitchTextureData, &texPitch, imageWidth * sizeof(float), imageHeight));
dim3 blockDim(8, 8);
dim3 gridDim(getNumberOfParts(imageWidth, blockDim.x), getNumberOfParts(imageHeight, blockDim.y));
switch (imageBPP)
{
//TODO: Here call your kernel to convert image into linearPitch memory
case 8:
colorToFloat<8> << <gridDim, blockDim>> > (dImageData, imageWidth, imageHeight, imagePitch, texPitch / sizeof(float), dLinearPitchTextureData);
break;
case 16:
colorToFloat<16> << <gridDim, blockDim >> > (dImageData, imageWidth, imageHeight, imagePitch, texPitch / sizeof(float), dLinearPitchTextureData);
break;
case 24:
colorToFloat<24> << <gridDim, blockDim >> > (dImageData, imageWidth, imageHeight, imagePitch, texPitch / sizeof(float), dLinearPitchTextureData);
break;
case 32:
colorToFloat<32> << <gridDim, blockDim >> > (dImageData, imageWidth, imageHeight, imagePitch, texPitch / sizeof(float), dLinearPitchTextureData);
break;
}
checkDeviceMatrix<float>(dLinearPitchTextureData, texPitch, imageHeight, imageWidth, "%6.1f ", "Result of Linear Pitch Text");
//TODO: Define texture channel descriptor (texChannelDesc)
texChannelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); //cudaCreateChannelDesc<float>();
//TODO: Define texture (texRef) parameters
texRef.normalized = 0;
texRef.addressMode[0] = cudaAddressModeClamp; // horizontal
texRef.addressMode[1] = cudaAddressModeClamp; // vertical
texRef.filterMode = cudaFilterModePoint;
//TODO: Bind texture
CHECK_CUDA_CALL(cudaBindTexture2D(NULL, &texRef, dLinearPitchTextureData, &texChannelDesc, imageWidth, imageHeight, texPitch));
}
static void createTextureFrom2DArray()
{
//TODO: Define texture channel descriptor (texChannelDesc)
texChannelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); //cudaCreateChannelDesc<float>();
//TODO: Define texture (texRef) parameters
texRef.normalized = 0;
texRef.addressMode[0] = cudaAddressModeClamp; // horizontal
texRef.addressMode[1] = cudaAddressModeClamp; // vertical
texRef.filterMode = cudaFilterModePoint;
//Converts custom image data to float and stores result in the float_linear_data
float* dLinearTextureData = nullptr;
CHECK_CUDA_CALL(cudaMalloc((void**)&dLinearTextureData, imageWidth * imageHeight * sizeof(float)));
dim3 blockDim(8, 8);
dim3 gridDim(getNumberOfParts(imageWidth, blockDim.x), getNumberOfParts(imageHeight, blockDim.y));
switch(imageBPP)
{
//TODO: Here call your kernel to convert image into linear memory (no pitch!!!)
case 8:
colorToFloat<8> << <gridDim, blockDim >> > (dImageData, imageWidth, imageHeight, imagePitch, imageWidth, dLinearTextureData);
break;
case 16:
colorToFloat<16> << <gridDim, blockDim >> > (dImageData, imageWidth, imageHeight, imagePitch, imageWidth, dLinearTextureData);
break;
case 24:
colorToFloat<24> << <gridDim, blockDim >> > (dImageData, imageWidth, imageHeight, imagePitch, imageWidth, dLinearTextureData);
break;
case 32:
colorToFloat<32> << <gridDim, blockDim >> > (dImageData, imageWidth, imageHeight, imagePitch, imageWidth, dLinearTextureData);
break;
}
checkDeviceMatrix<float>(dLinearTextureData, imageWidth, imageHeight, imageWidth, "%6.1f ", "Result of Linear Text");
CHECK_CUDA_CALL(cudaMallocArray(&dArrayTextureData, &texChannelDesc, imageWidth, imageHeight));
//TODO: copy data into cuda array (dArrayTextureData)
CHECK_CUDA_CALL(cudaMemcpyToArray(dArrayTextureData, 0, 0, dLinearTextureData, imageWidth * imageHeight * sizeof(float), cudaMemcpyDeviceToDevice));
checkDeviceArray<float>(dArrayTextureData, imageWidth, imageHeight, imageWidth, "%6.1f", "Texture array");
//TODO: Bind texture
CHECK_CUDA_CALL(cudaBindTextureToArray(&texRef, dArrayTextureData, &texChannelDesc));
CHECK_CUDA_CALL(cudaFree(dLinearTextureData));
}
static void releaseMemory()
{
CHECK_CUDA_CALL(cudaUnbindTexture(&texRef));
if (dImageData!=0)
CHECK_CUDA_CALL(cudaFree(dImageData));
if (dLinearPitchTextureData!=0)
CHECK_CUDA_CALL(cudaFree(dLinearPitchTextureData));
if (dArrayTextureData)
CHECK_CUDA_CALL(cudaFreeArray(dArrayTextureData));
if (dOutputData)
CHECK_CUDA_CALL(cudaFree(dOutputData));
}
static __global__ void texKernel(const unsigned int texWidth, const unsigned int texHeight, float* dst)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
//TODO some kernel
}
void cviko5()
{
loadSourceImage("textures/terrain10x10.tif");
CHECK_CUDA_CALL(cudaMalloc((void**)&dOutputData, imageWidth * imageHeight * sizeof(float)));
ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1);
ks.blockSize = BLOCK_DIM * BLOCK_DIM;
ks.dimGrid = dim3((imageWidth + BLOCK_DIM-1)/BLOCK_DIM, (imageHeight + BLOCK_DIM-1)/BLOCK_DIM, 1);
//Test 1 - texture stored in linear pitch memory
/*createTextureFromLinearPitchMemory();
texKernel<<<ks.dimGrid, ks.dimBlock>>>(imageWidth, imageHeight, dOutputData);
checkDeviceMatrix<float>(dOutputData, imageWidth * sizeof(float), imageHeight, imageWidth, "%6.1f ", "dOutputData");*/
//Test 2 - texture stored in 2D array
createTextureFrom2DArray();
texKernel<<<ks.dimGrid, ks.dimBlock>>>(imageWidth, imageHeight, dOutputData);
checkDeviceMatrix<float>(dOutputData, imageWidth * sizeof(float), imageHeight, imageWidth, "%6.1f ", "dOutputData");
releaseMemory();
}
|
17461fa9e36b12164b8bf73ad67e37cc268cd6c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ int sum = 1; __global__ void degreeCalc (int *array){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i>=1000000){
return;
}
sum+=array[i];
// if (i==999999){
// printf("%d", sum);
// }
}
__global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int start = -1, stop = -1;
int diff=0;
start = vertexArray[i];
stop = vertexArray[i+1];
diff = stop-start;
degreeCount[i]=diff;
} | 17461fa9e36b12164b8bf73ad67e37cc268cd6c7.cu | #include "includes.h"
__device__ int sum = 1; __global__ void degreeCalc (int *array){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i>=1000000){
return;
}
sum+=array[i];
// if (i==999999){
// printf("%d", sum);
// }
}
__global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int start = -1, stop = -1;
int diff=0;
start = vertexArray[i];
stop = vertexArray[i+1];
diff = stop-start;
degreeCount[i]=diff;
} |
e983092254c538416725c8ced8a9d12dbc66e209.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
static void conv_forward_valid(const float *X, const int xdims[4],
const float *W, const int wdims[4], float *Y,
const int ydims[4]);
void testConv_v2(float* x, float* conv1, const int* dim1, const int* dim2) {
int *d_xdims, *d_adims, *d_conv1dims;
float *d_x, *d_conv1;
float* ad; //will store the result of para conv
const int adims[] = {dim1[0], (dim1[1] - dim2[0] + 1), (dim1[2] - dim2[1] + 1), dim2[3]};
auto a = zeros<float>(adims);
auto ah = zeros<float>(adims); //host
hipMalloc(&d_xdims, sizeof(int) * 4);
hipMalloc(&d_adims, sizeof(int) * 4);
hipMalloc(&d_conv1dims, sizeof(int) * 4);
hipMalloc(&d_x, sizeof(float) * dim1[0] * dim1[1] * dim1[2] * dim1[3]);
hipMalloc(&d_conv1, sizeof(float) * dim2[0] * dim2[1] * dim2[2] * dim2[3]);
hipMalloc(&ad, sizeof(float) * adims[0] * adims[1] * adims[2] * adims[3]);
hipMemcpy(d_x, x, sizeof(float) * dim1[0] * dim1[1] * dim1[2] * dim1[3], hipMemcpyHostToDevice);
hipMemcpy(d_conv1, conv1, sizeof(float) * dim2[0] * dim2[1] * dim2[2] * dim2[3], hipMemcpyHostToDevice);
hipMemcpy(d_adims, adims, sizeof(int) * 4, hipMemcpyHostToDevice);
hipMemcpy(d_xdims, dim1, sizeof(int) * 4, hipMemcpyHostToDevice);
hipMemcpy(d_conv1dims, dim2, sizeof(int) * 4, hipMemcpyHostToDevice);
hipMemset(ad, 0, sizeof(float)* adims[0] * adims[1] * adims[2] * adims[3]);
conv_forward_valid(x, dim1, conv1, dim2, a, adims);
dim3 convGrid(dim1[0], 1, 1);
dim3 convBlock(128, 1, 1);
hipLaunchKernelGGL(( conv_forward_para_v2) , dim3(convGrid), dim3(convBlock), 0, 0, d_x, d_xdims,
d_conv1, d_conv1dims, ad, d_adims);
hipDeviceSynchronize();
hipMemcpy(ah, ad, sizeof(float) * adims[0] * adims[1] * adims[2] * adims[3], hipMemcpyDeviceToHost);
float count = 0.0;
for (const auto j : range(0, adims[0]*adims[1]*adims[2]*adims[3])) {
if ((ah[j] - a[j])/a[j] > 1e-2) {
printf("I get %d: %f", j, ah[j]);
printf(" Should be : %f\n", a[j]);
count++;
}
}
printf("Accuracy is %f\n", count/(adims[0]*adims[1]*adims[2]*adims[3]));
}
// This function would print a lot of conv result for testing.
void testConv(float* x, float* conv1, const int* dim1, const int* dim2) {
int *d_xdims, *d_adims, *d_conv1dims;
hipMalloc(&d_xdims, sizeof(int) * 4);
hipMalloc(&d_adims, sizeof(int) * 4);
hipMalloc(&d_conv1dims, sizeof(int) * 4);
float *d_x, *d_conv1;
hipMalloc(&d_x, sizeof(float) * dim1[0] * dim1[1] * dim1[2] * dim1[3]);
hipMalloc(&d_conv1, sizeof(float) * dim2[0] * dim2[1]
* dim2[2] * dim2[3]);
hipMemcpy(d_x, x, sizeof(float) * dim1[0] * dim1[1] * dim1[2] * dim1[3],
hipMemcpyHostToDevice);
hipMemcpy(d_conv1, conv1, sizeof(float) * dim2[0] * dim2[1]
* dim2[2] * dim2[3], hipMemcpyHostToDevice);
const int adims[] = {dim1[0], (dim1[1] - dim2[0] + 1),
(dim1[2] - dim2[1] + 1), dim2[3]};
hipMemcpy(d_adims, adims, sizeof(int) * 4, hipMemcpyHostToDevice);
hipMemcpy(d_xdims, dim1, sizeof(int) * 4, hipMemcpyHostToDevice);
hipMemcpy(d_conv1dims, dim2, sizeof(int) * 4, hipMemcpyHostToDevice);
auto a = zeros<float>(adims);
conv_forward_valid(x, dim1, conv1, dim2, a, adims);
float* ad; //will store the result of first conv
hipMalloc(&ad, sizeof(float) * adims[0] * adims[1] * adims[2] * adims[3]);
hipMemset(ad, 0, sizeof(float)* adims[0] * adims[1] * adims[2] * adims[3]);
auto ah = zeros<float>(adims); //host
dim3 convGrid(adims[0], 1, 1);
dim3 convBlock(128, 1, 1);
hipLaunchKernelGGL(( conv_forward_para1) , dim3(convGrid), dim3(convBlock), 0, 0, d_x, d_xdims,
d_conv1, d_conv1dims, ad, d_adims);
hipDeviceSynchronize();
hipMemcpy(ah, ad, sizeof(float) * adims[0] * adims[1] * adims[2] * adims[3],
hipMemcpyDeviceToHost);
float count = 0.0;
for (const auto j : range(0, adims[0]*adims[1]*adims[2]*adims[3])) {
if ((ah[j] - a[j])/a[j] > 1e-2) {
printf("I get %d: %f", j, ah[j]);
printf(" Should be : %f\n", a[j]);
count++;
}
}
printf("Accuracy is %f\n", count/(adims[0]*adims[1]*adims[2]*adims[3]));
}
void testPool(float* x, const int* dim) {
int *d_dim;
float *d_x, *d_y;
hipMalloc(&d_dim, sizeof(int)*4);
hipMalloc(&d_x, sizeof(float) * dim[0] * dim[1] * dim[2] * dim[3]);
hipMalloc(&d_y, sizeof(float) * dim[0] * dim[1] * dim[2] * dim[3] / 4);
hipMemcpy(d_dim, dim, sizeof(int) * 4, hipMemcpyHostToDevice);
hipMemcpy(d_x, x, sizeof(float) * dim[0] * dim[1] * dim[2] * dim[3],
hipMemcpyHostToDevice);
dim3 poolBlock(128, 1, 1);
float batch_size = 10.0; //number of input images each block should handle
dim3 poolGrid(ceil(dim[2] * dim[3] / 256.0), ceil(dim[0] / batch_size), 1);
hipLaunchKernelGGL(( average_pool_para) , dim3(poolGrid), dim3(poolBlock), 0, 0, d_x, d_dim, d_y);
hipDeviceSynchronize();
const int dim2[] = {dim[0], dim[1] / 2, dim[2] / 2,
dim[3]};
auto y = zeros<float>(dim2);
auto bh = zeros<float>(dim2);
hipMemcpy(y, d_y, sizeof(float) * dim2[0] * dim2[1] * dim2[2] * dim2[3],
hipMemcpyDeviceToHost);
// average_pool(x, dim, 2, bh, dim2);
float count = 0.0;
for (const auto i : range(0, dim2[0] * dim2[1] * dim2[2] * dim2[3])) {
if ((bh[i] - y[i]) > 1e-5) {
//printf("I get %d %d %d, %d: %f", i/(dim2[1] * dim2[2] * dim2[3]),
// (i/(dim2[3] * dim2[2]))%dim2[1],
// i/(dim2[3])%(dim2[1]), y[i]);
//printf(" Should be : %f\n", bh[i]);
count++;
}
}
printf("Accuracy is %f\n", count/(dim2[0]*dim2[1]*dim2[2]*dim2[3]));
}
void testRelu(float* x, const int dim[4]) {
int *d_dim;
float *d_x;
hipMalloc(&d_dim, sizeof(int)*4);
hipMalloc(&d_x, sizeof(float) * dim[0] * dim[1] * dim[2] * dim[3]);
hipMemcpy(d_dim, dim, sizeof(int) * 4, hipMemcpyHostToDevice);
hipMemcpy(d_x, x, sizeof(float) * dim[0] * dim[1] * dim[2] * dim[3],
hipMemcpyHostToDevice);
int grid = ceil(dim[0] * dim[1] * dim[2] * dim[3] / 128.0);
hipLaunchKernelGGL(( relu_para) , dim3(grid), dim3(128), 0, 0, d_x, dim[0] * dim[1] * dim[2] * dim[3]);
const int dim2[] = {dim[0], dim[1], dim[2],
dim[3]};
auto out = zeros<float>(dim2);
auto bh = zeros<float>(dim2);
hipMemcpy(out, d_x, sizeof(float) * dim[0] * dim[1] * dim[2] * dim[3],
hipMemcpyDeviceToHost);
// relu4(x, dim);
float count = 0.0;
for (const auto i : range(0, dim[0] * dim[1] * dim[2] * dim[3])) {
if ((x[i] - out[i]) > 1e-5) {
//printf("I get %d %d %d, %d: %f", i/(dim2[1] * dim2[2] * dim2[3]),
// (i/(dim2[3] * dim2[2]))%dim2[1],
// i/(dim2[3])%(dim2[1]), y[i]);
//printf(" Should be : %f\n", bh[i]);
count++;
}
}
printf("Accuracy is %f\n", count/(dim[0]*dim[1]*dim[2]*dim[3]));
}
| e983092254c538416725c8ced8a9d12dbc66e209.cu |
static void conv_forward_valid(const float *X, const int xdims[4],
const float *W, const int wdims[4], float *Y,
const int ydims[4]);
void testConv_v2(float* x, float* conv1, const int* dim1, const int* dim2) {
int *d_xdims, *d_adims, *d_conv1dims;
float *d_x, *d_conv1;
float* ad; //will store the result of para conv
const int adims[] = {dim1[0], (dim1[1] - dim2[0] + 1), (dim1[2] - dim2[1] + 1), dim2[3]};
auto a = zeros<float>(adims);
auto ah = zeros<float>(adims); //host
cudaMalloc(&d_xdims, sizeof(int) * 4);
cudaMalloc(&d_adims, sizeof(int) * 4);
cudaMalloc(&d_conv1dims, sizeof(int) * 4);
cudaMalloc(&d_x, sizeof(float) * dim1[0] * dim1[1] * dim1[2] * dim1[3]);
cudaMalloc(&d_conv1, sizeof(float) * dim2[0] * dim2[1] * dim2[2] * dim2[3]);
cudaMalloc(&ad, sizeof(float) * adims[0] * adims[1] * adims[2] * adims[3]);
cudaMemcpy(d_x, x, sizeof(float) * dim1[0] * dim1[1] * dim1[2] * dim1[3], cudaMemcpyHostToDevice);
cudaMemcpy(d_conv1, conv1, sizeof(float) * dim2[0] * dim2[1] * dim2[2] * dim2[3], cudaMemcpyHostToDevice);
cudaMemcpy(d_adims, adims, sizeof(int) * 4, cudaMemcpyHostToDevice);
cudaMemcpy(d_xdims, dim1, sizeof(int) * 4, cudaMemcpyHostToDevice);
cudaMemcpy(d_conv1dims, dim2, sizeof(int) * 4, cudaMemcpyHostToDevice);
cudaMemset(ad, 0, sizeof(float)* adims[0] * adims[1] * adims[2] * adims[3]);
conv_forward_valid(x, dim1, conv1, dim2, a, adims);
dim3 convGrid(dim1[0], 1, 1);
dim3 convBlock(128, 1, 1);
conv_forward_para_v2 <<<convGrid, convBlock>>> (d_x, d_xdims,
d_conv1, d_conv1dims, ad, d_adims);
cudaDeviceSynchronize();
cudaMemcpy(ah, ad, sizeof(float) * adims[0] * adims[1] * adims[2] * adims[3], cudaMemcpyDeviceToHost);
float count = 0.0;
for (const auto j : range(0, adims[0]*adims[1]*adims[2]*adims[3])) {
if ((ah[j] - a[j])/a[j] > 1e-2) {
printf("I get %d: %f", j, ah[j]);
printf(" Should be : %f\n", a[j]);
count++;
}
}
printf("Accuracy is %f\n", count/(adims[0]*adims[1]*adims[2]*adims[3]));
}
// This function would print a lot of conv result for testing.
void testConv(float* x, float* conv1, const int* dim1, const int* dim2) {
int *d_xdims, *d_adims, *d_conv1dims;
cudaMalloc(&d_xdims, sizeof(int) * 4);
cudaMalloc(&d_adims, sizeof(int) * 4);
cudaMalloc(&d_conv1dims, sizeof(int) * 4);
float *d_x, *d_conv1;
cudaMalloc(&d_x, sizeof(float) * dim1[0] * dim1[1] * dim1[2] * dim1[3]);
cudaMalloc(&d_conv1, sizeof(float) * dim2[0] * dim2[1]
* dim2[2] * dim2[3]);
cudaMemcpy(d_x, x, sizeof(float) * dim1[0] * dim1[1] * dim1[2] * dim1[3],
cudaMemcpyHostToDevice);
cudaMemcpy(d_conv1, conv1, sizeof(float) * dim2[0] * dim2[1]
* dim2[2] * dim2[3], cudaMemcpyHostToDevice);
const int adims[] = {dim1[0], (dim1[1] - dim2[0] + 1),
(dim1[2] - dim2[1] + 1), dim2[3]};
cudaMemcpy(d_adims, adims, sizeof(int) * 4, cudaMemcpyHostToDevice);
cudaMemcpy(d_xdims, dim1, sizeof(int) * 4, cudaMemcpyHostToDevice);
cudaMemcpy(d_conv1dims, dim2, sizeof(int) * 4, cudaMemcpyHostToDevice);
auto a = zeros<float>(adims);
conv_forward_valid(x, dim1, conv1, dim2, a, adims);
float* ad; //will store the result of first conv
cudaMalloc(&ad, sizeof(float) * adims[0] * adims[1] * adims[2] * adims[3]);
cudaMemset(ad, 0, sizeof(float)* adims[0] * adims[1] * adims[2] * adims[3]);
auto ah = zeros<float>(adims); //host
dim3 convGrid(adims[0], 1, 1);
dim3 convBlock(128, 1, 1);
conv_forward_para1 <<<convGrid, convBlock>>> (d_x, d_xdims,
d_conv1, d_conv1dims, ad, d_adims);
cudaDeviceSynchronize();
cudaMemcpy(ah, ad, sizeof(float) * adims[0] * adims[1] * adims[2] * adims[3],
cudaMemcpyDeviceToHost);
float count = 0.0;
for (const auto j : range(0, adims[0]*adims[1]*adims[2]*adims[3])) {
if ((ah[j] - a[j])/a[j] > 1e-2) {
printf("I get %d: %f", j, ah[j]);
printf(" Should be : %f\n", a[j]);
count++;
}
}
printf("Accuracy is %f\n", count/(adims[0]*adims[1]*adims[2]*adims[3]));
}
void testPool(float* x, const int* dim) {
int *d_dim;
float *d_x, *d_y;
cudaMalloc(&d_dim, sizeof(int)*4);
cudaMalloc(&d_x, sizeof(float) * dim[0] * dim[1] * dim[2] * dim[3]);
cudaMalloc(&d_y, sizeof(float) * dim[0] * dim[1] * dim[2] * dim[3] / 4);
cudaMemcpy(d_dim, dim, sizeof(int) * 4, cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, sizeof(float) * dim[0] * dim[1] * dim[2] * dim[3],
cudaMemcpyHostToDevice);
dim3 poolBlock(128, 1, 1);
float batch_size = 10.0; //number of input images each block should handle
dim3 poolGrid(ceil(dim[2] * dim[3] / 256.0), ceil(dim[0] / batch_size), 1);
average_pool_para <<<poolGrid, poolBlock>>> (d_x, d_dim, d_y);
cudaDeviceSynchronize();
const int dim2[] = {dim[0], dim[1] / 2, dim[2] / 2,
dim[3]};
auto y = zeros<float>(dim2);
auto bh = zeros<float>(dim2);
cudaMemcpy(y, d_y, sizeof(float) * dim2[0] * dim2[1] * dim2[2] * dim2[3],
cudaMemcpyDeviceToHost);
// average_pool(x, dim, 2, bh, dim2);
float count = 0.0;
for (const auto i : range(0, dim2[0] * dim2[1] * dim2[2] * dim2[3])) {
if ((bh[i] - y[i]) > 1e-5) {
//printf("I get %d %d %d, %d: %f", i/(dim2[1] * dim2[2] * dim2[3]),
// (i/(dim2[3] * dim2[2]))%dim2[1],
// i/(dim2[3])%(dim2[1]), y[i]);
//printf(" Should be : %f\n", bh[i]);
count++;
}
}
printf("Accuracy is %f\n", count/(dim2[0]*dim2[1]*dim2[2]*dim2[3]));
}
void testRelu(float* x, const int dim[4]) {
int *d_dim;
float *d_x;
cudaMalloc(&d_dim, sizeof(int)*4);
cudaMalloc(&d_x, sizeof(float) * dim[0] * dim[1] * dim[2] * dim[3]);
cudaMemcpy(d_dim, dim, sizeof(int) * 4, cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, sizeof(float) * dim[0] * dim[1] * dim[2] * dim[3],
cudaMemcpyHostToDevice);
int grid = ceil(dim[0] * dim[1] * dim[2] * dim[3] / 128.0);
relu_para <<<grid, 128>>> (d_x, dim[0] * dim[1] * dim[2] * dim[3]);
const int dim2[] = {dim[0], dim[1], dim[2],
dim[3]};
auto out = zeros<float>(dim2);
auto bh = zeros<float>(dim2);
cudaMemcpy(out, d_x, sizeof(float) * dim[0] * dim[1] * dim[2] * dim[3],
cudaMemcpyDeviceToHost);
// relu4(x, dim);
float count = 0.0;
for (const auto i : range(0, dim[0] * dim[1] * dim[2] * dim[3])) {
if ((x[i] - out[i]) > 1e-5) {
//printf("I get %d %d %d, %d: %f", i/(dim2[1] * dim2[2] * dim2[3]),
// (i/(dim2[3] * dim2[2]))%dim2[1],
// i/(dim2[3])%(dim2[1]), y[i]);
//printf(" Should be : %f\n", bh[i]);
count++;
}
}
printf("Accuracy is %f\n", count/(dim[0]*dim[1]*dim[2]*dim[3]));
}
|
56c79608147ff010b316ee3e8d5b51848179eb72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void dnorm_kernel(float *vals, int N, float mu, float sigma)
{
// Taken from geco.mines.edu/workshop/aug2010/slides/fri/cuda1.pd
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
/* how big is each block within a grid */
int blocksize = blockDim.x * blockDim.y * blockDim.z;
/* get thread within a block */
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < N) {
float std = (vals[idx] - mu)/sigma;
float e = exp( - 0.5 * std * std);
vals[idx] = e / ( sigma * sqrt(2 * 3.141592653589793));
}
}
| 56c79608147ff010b316ee3e8d5b51848179eb72.cu | extern "C"
__global__ void dnorm_kernel(float *vals, int N, float mu, float sigma)
{
// Taken from geco.mines.edu/workshop/aug2010/slides/fri/cuda1.pd
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
/* how big is each block within a grid */
int blocksize = blockDim.x * blockDim.y * blockDim.z;
/* get thread within a block */
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < N) {
float std = (vals[idx] - mu)/sigma;
float e = exp( - 0.5 * std * std);
vals[idx] = e / ( sigma * sqrt(2 * 3.141592653589793));
}
}
|
2c60a263a65f92149794e33aa88b5920772b9c70.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include "../../common/para.h"
// Num. of sample
#define N_samp 8
#define N_col 64
// Num. of Channel
#define N_ch (TK_NUM * BT_NUM)
double my_timer()
{
struct timeval time;
double _ret_val_0;
gettimeofday(( & time), 0);
_ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0));
return _ret_val_0;
}
__global__ void FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn,
float *Vect_Up, float *Vect_F, float *F, int *size,
int *threads, int index);
void h_FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn,
float *Vect_Up, float *Vect_F, float *F, int *size,
int *threads, int index);
int main(){
float **r;
float **r_dev;
float **H;
float **H_dev;
float **F;
float **F_dev;
float **Vect_H; // output of the F
float **Vect_H_dev;
float **Vect_Dn; // output of the down sampler
float **Vect_Dn_dev;
float **Vect_Up; // output of the up sampler
float **Vect_Up_dev;
float **Vect_F; // this is the output of the
float **Vect_F_dev;
int num_thread[N_ch], *num_thread_dev;
int num_size[BT_NUM];
int pos_task[BT_NUM][TK_NUM];
int **pos_task_dev;
float **h_Vect_F;
hipSetDevice(0);
FILE *f;
int i, j;
double start_timer, end_timer;
f = fopen("rand.txt", "r");
for(i = 0; i < N_ch; i++)
fscanf(f, "%1d", &num_thread[i]);
fclose(f);
for(i = 0; i < BT_NUM; i++){
num_size[i] = 0;
}
for(i = 0; i < BT_NUM; i++){
for(j = 0; j < TK_NUM; j++){
num_size[i] += (num_thread[i*TK_NUM+j]*16)*
(num_thread[i*TK_NUM+j]*16);
pos_task[i][j] = 0;
if(j > 0) pos_task[i][j] += pos_task[i][j-1] + (num_thread[i*TK_NUM+j-1]*16)*
(num_thread[i*TK_NUM+j-1]*16);
}
}
for(i = 0; i < N_ch; i++)
num_thread[i] *= 32;
r = (float**)malloc(BT_NUM*sizeof(float*));
H = (float**)malloc(BT_NUM*sizeof(float*));
F = (float**)malloc(BT_NUM*sizeof(float*));
Vect_H = (float**)malloc(BT_NUM*sizeof(float*));
Vect_Dn = (float**)malloc(BT_NUM*sizeof(float*));
Vect_Up = (float**)malloc(BT_NUM*sizeof(float*));
Vect_F = (float**)malloc(BT_NUM*sizeof(float*));
r_dev = (float**)malloc(BT_NUM*sizeof(float*));
H_dev = (float**)malloc(BT_NUM*sizeof(float*));
F_dev = (float**)malloc(BT_NUM*sizeof(float*));
Vect_H_dev = (float**)malloc(BT_NUM*sizeof(float*));
Vect_Dn_dev = (float**)malloc(BT_NUM*sizeof(float*));
Vect_Up_dev = (float**)malloc(BT_NUM*sizeof(float*));
Vect_F_dev = (float**)malloc(BT_NUM*sizeof(float*));
pos_task_dev = (int**)malloc(BT_NUM*sizeof(int*));
h_Vect_F = (float**)malloc(BT_NUM*sizeof(float*));
/*Memory allocation*/
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipHostMalloc(&r[i], num_size[i]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&r_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(hipHostMalloc(&H[i], num_size[i]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&H_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(hipHostMalloc(&F[i], num_size[i]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&F_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(hipHostMalloc(&Vect_H[i], num_size[i]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&Vect_H_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(hipHostMalloc(&Vect_Dn[i], (num_size[i]/N_samp)*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&Vect_Dn_dev[i], (num_size[i]/N_samp)*sizeof(float)));
checkCudaErrors(hipHostMalloc(&Vect_Up[i], num_size[i]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&Vect_Up_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(hipHostMalloc(&Vect_F[i], num_size[i]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&Vect_F_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(hipMalloc(&pos_task_dev[i], TK_NUM*sizeof(int)));
h_Vect_F[i] = (float*)malloc(num_size[i] * sizeof(float));
}
checkCudaErrors(hipMalloc(&num_thread_dev, N_ch*sizeof(int)));
printf("Filterbank CUDA static fusion inputs are generating\n");
/*init data*/
for(i = 0; i < BT_NUM; i++)
for(j = 0; j < num_size[i]; j++){
r[i][j] = j + 0.0001;
Vect_Up[i][j] = 0;
Vect_F[i][j] = 0;
Vect_H[i][j]=0;
h_Vect_F[i][j] = 0;
}
for(i = 0; i < BT_NUM; i++)
for(j = 0; j < num_size[i]; j++){
H[i][j] = 0.0001;
F[i][j] = 0.0001;
}
// Data transfer to device
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipMemcpy(r_dev[i], r[i], num_size[i]*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(Vect_Up_dev[i], Vect_Up[i], num_size[i]*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(Vect_F_dev[i], Vect_F[i], num_size[i]*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(Vect_H_dev[i], Vect_H[i], num_size[i]*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(H_dev[i], H[i], num_size[i]*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(F_dev[i], F[i], num_size[i]*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(pos_task_dev[i], pos_task[i], TK_NUM*sizeof(int), hipMemcpyHostToDevice));
}
checkCudaErrors(hipMemcpy(num_thread_dev, num_thread, N_ch*sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipDeviceSynchronize());
printf("Filterbank CUDA static fusion is running\n");
// task launch
start_timer = my_timer();
for(i = 0; i < BT_NUM; i++){
hipLaunchKernelGGL(( FBCore), dim3(TK_NUM), dim3(TDK_NUM), 0, 0, r_dev[i], H_dev[i], Vect_H_dev[i],
Vect_Dn_dev[i], Vect_Up_dev[i], Vect_F_dev[i], F_dev[i], pos_task_dev[i], num_thread_dev, i);
}
checkCudaErrors(hipDeviceSynchronize());
end_timer = my_timer();
printf("Filterbank CUDA static fusion Elapsed Time: %f Sec.\n", end_timer - start_timer);
start_timer = my_timer();
// Data transfer back to host
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipMemcpy(Vect_F[i], Vect_F_dev[i], num_size[i]*sizeof(float), hipMemcpyDeviceToHost));
}
checkCudaErrors(hipDeviceSynchronize());
#if 0
// CPU task launch
printf("CPU program running\n");
start_timer = my_timer();
for(i = 0; i < BT_NUM; i++){
h_FBCore(r[i], H[i], Vect_H[i],
Vect_Dn[i], Vect_Up[i], h_Vect_F[i], F[i], pos_task[i], num_thread, i);
}
end_timer = my_timer();
//printf("The CPU Elapsed time:%f Sec.\n", end_timer - start_timer);
/*Verify*/
printf("Verify\n");
int flag = 0;
for(i = 0; i < BT_NUM; i++){
for(j = 0; j < num_size[i]; j++){
if(abs(h_Vect_F[i][j] - Vect_F[i][j]) > 1e-3){
printf("Error:%f, %f, %d\n", h_Vect_F[i][j], Vect_F[i][j], i);
flag = 1;
break;
}
}
}
if(!flag) printf("Verify successfully\n");
#endif
/*Free Memory*/
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipHostFree(r[i]));
checkCudaErrors(hipFree(r_dev[i]));
checkCudaErrors(hipHostFree(H[i]));
checkCudaErrors(hipFree(H_dev[i]));
checkCudaErrors(hipHostFree(F[i]));
checkCudaErrors(hipFree(F_dev[i]));
checkCudaErrors(hipHostFree(Vect_H[i]));
checkCudaErrors(hipFree(Vect_H_dev[i]));
checkCudaErrors(hipHostFree(Vect_Dn[i]));
checkCudaErrors(hipFree(Vect_Dn_dev[i]));
checkCudaErrors(hipHostFree(Vect_Up[i]));
checkCudaErrors(hipFree(Vect_Up_dev[i]));
checkCudaErrors(hipHostFree(Vect_F[i]));
checkCudaErrors(hipFree(pos_task_dev[i]));
}
checkCudaErrors(hipFree(num_thread_dev));
free(r);
free(H);
free(F);
free(Vect_H);
free(Vect_Dn);
free(Vect_Up);
free(Vect_F);
free(r_dev);
free(H_dev);
free(F_dev);
free(Vect_H_dev);
free(Vect_Dn_dev);
free(Vect_Up_dev);
free(Vect_F_dev);
free(pos_task_dev);
return 0;
}
__global__ void FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn,
float *Vect_Up, float *Vect_F, float *F, int *size,
int *threads, int index){
int tid = threadIdx.x;
int td;
int j, k;
td = threads[index*TK_NUM+blockIdx.x];
//convolving H
if(tid < td){
for (j=0; j< ((td*td/4)/td); j++){
for(k = 0; k < N_col; k++){
if(((j*td+tid)-k)>=0){
Vect_H[j*td+tid+size[blockIdx.x]] +=
(r[(j*td+tid)-k+size[blockIdx.x]]*H[k+size[blockIdx.x]]);
}
}
}
}
__syncthreads();
//Down Sampling
if(tid < td)
for (j=0; j < (td*td/4)/N_samp/td; j++){
Vect_Dn[(j*td+tid)+size[blockIdx.x]]
=Vect_H[(j*td+tid)*N_samp+size[blockIdx.x]];
}
//Up Sampling
if(tid < td)
for (j=0; j < (td*td/4)/N_samp/td;j++){
Vect_Up[(j*td+tid)*N_samp+size[blockIdx.x]]
=Vect_Dn[(j*td+tid)+size[blockIdx.x]];
}
__syncthreads();
//convolving F
if(tid < td){
for (j=0; j< ((td*td/4)/td); j++){
for(k = 0; k < N_col; k++){
if(((j*td+tid)-k)>=0){
Vect_F[j*td+tid+size[blockIdx.x]] +=
(F[k]*Vect_H[(j*td+tid)-k+size[blockIdx.x]]);
}
}
}
}
}
void h_FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn,
float *Vect_Up, float *Vect_F, float *F, int *size,
int *threads, int index){
int td, tid;
int i, j, k;
for(i = 0; i < TK_NUM; i++){
td = threads[index*TK_NUM+i];
//convolving H
for(tid = 0; tid < td; tid ++){
for (j=0; j< ((td*td/4)/td); j++){
for(k = 0; k < N_col; k++){
if(((j*td+tid)-k)>=0){
Vect_H[j*td+tid+size[i]] +=
(r[(j*td+tid)-k+size[i]]*H[k+size[i]]);
}
}
}
}
//Down Sampling
for(tid = 0; tid < td; tid ++)
for (j=0; j < (td*td/4)/N_samp/td; j++){
Vect_Dn[(j*td+tid)+size[i]]
=Vect_H[(j*td+tid)*N_samp+size[i]];
}
//Up Sampling
for(tid = 0; tid < td; tid ++)
for (j=0; j < (td*td/4)/N_samp/td;j++){
Vect_Up[(j*td+tid)*N_samp+size[i]]
=Vect_Dn[(j*td+tid)+size[i]];
}
//convolving F
for(tid = 0; tid < td; tid ++){
for (j=0; j< ((td*td/4)/td); j++){
for(k = 0; k < N_col; k++){
if(((j*td+tid)-k)>=0){
Vect_F[j*td+tid+size[i]]+=
(F[k]*Vect_H[(j*td+tid)-k+size[i]]);
}
}
}
}
}
}
| 2c60a263a65f92149794e33aa88b5920772b9c70.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include "../../common/para.h"
// Num. of sample
#define N_samp 8
#define N_col 64
// Num. of Channel
#define N_ch (TK_NUM * BT_NUM)
double my_timer()
{
struct timeval time;
double _ret_val_0;
gettimeofday(( & time), 0);
_ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0));
return _ret_val_0;
}
__global__ void FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn,
float *Vect_Up, float *Vect_F, float *F, int *size,
int *threads, int index);
void h_FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn,
float *Vect_Up, float *Vect_F, float *F, int *size,
int *threads, int index);
int main(){
float **r;
float **r_dev;
float **H;
float **H_dev;
float **F;
float **F_dev;
float **Vect_H; // output of the F
float **Vect_H_dev;
float **Vect_Dn; // output of the down sampler
float **Vect_Dn_dev;
float **Vect_Up; // output of the up sampler
float **Vect_Up_dev;
float **Vect_F; // this is the output of the
float **Vect_F_dev;
int num_thread[N_ch], *num_thread_dev;
int num_size[BT_NUM];
int pos_task[BT_NUM][TK_NUM];
int **pos_task_dev;
float **h_Vect_F;
cudaSetDevice(0);
FILE *f;
int i, j;
double start_timer, end_timer;
f = fopen("rand.txt", "r");
for(i = 0; i < N_ch; i++)
fscanf(f, "%1d", &num_thread[i]);
fclose(f);
for(i = 0; i < BT_NUM; i++){
num_size[i] = 0;
}
for(i = 0; i < BT_NUM; i++){
for(j = 0; j < TK_NUM; j++){
num_size[i] += (num_thread[i*TK_NUM+j]*16)*
(num_thread[i*TK_NUM+j]*16);
pos_task[i][j] = 0;
if(j > 0) pos_task[i][j] += pos_task[i][j-1] + (num_thread[i*TK_NUM+j-1]*16)*
(num_thread[i*TK_NUM+j-1]*16);
}
}
for(i = 0; i < N_ch; i++)
num_thread[i] *= 32;
r = (float**)malloc(BT_NUM*sizeof(float*));
H = (float**)malloc(BT_NUM*sizeof(float*));
F = (float**)malloc(BT_NUM*sizeof(float*));
Vect_H = (float**)malloc(BT_NUM*sizeof(float*));
Vect_Dn = (float**)malloc(BT_NUM*sizeof(float*));
Vect_Up = (float**)malloc(BT_NUM*sizeof(float*));
Vect_F = (float**)malloc(BT_NUM*sizeof(float*));
r_dev = (float**)malloc(BT_NUM*sizeof(float*));
H_dev = (float**)malloc(BT_NUM*sizeof(float*));
F_dev = (float**)malloc(BT_NUM*sizeof(float*));
Vect_H_dev = (float**)malloc(BT_NUM*sizeof(float*));
Vect_Dn_dev = (float**)malloc(BT_NUM*sizeof(float*));
Vect_Up_dev = (float**)malloc(BT_NUM*sizeof(float*));
Vect_F_dev = (float**)malloc(BT_NUM*sizeof(float*));
pos_task_dev = (int**)malloc(BT_NUM*sizeof(int*));
h_Vect_F = (float**)malloc(BT_NUM*sizeof(float*));
/*Memory allocation*/
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaHostAlloc(&r[i], num_size[i]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&r_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&H[i], num_size[i]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&H_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&F[i], num_size[i]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&F_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&Vect_H[i], num_size[i]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&Vect_H_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&Vect_Dn[i], (num_size[i]/N_samp)*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&Vect_Dn_dev[i], (num_size[i]/N_samp)*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&Vect_Up[i], num_size[i]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&Vect_Up_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&Vect_F[i], num_size[i]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&Vect_F_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(cudaMalloc(&pos_task_dev[i], TK_NUM*sizeof(int)));
h_Vect_F[i] = (float*)malloc(num_size[i] * sizeof(float));
}
checkCudaErrors(cudaMalloc(&num_thread_dev, N_ch*sizeof(int)));
printf("Filterbank CUDA static fusion inputs are generating\n");
/*init data*/
for(i = 0; i < BT_NUM; i++)
for(j = 0; j < num_size[i]; j++){
r[i][j] = j + 0.0001;
Vect_Up[i][j] = 0;
Vect_F[i][j] = 0;
Vect_H[i][j]=0;
h_Vect_F[i][j] = 0;
}
for(i = 0; i < BT_NUM; i++)
for(j = 0; j < num_size[i]; j++){
H[i][j] = 0.0001;
F[i][j] = 0.0001;
}
// Data transfer to device
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaMemcpy(r_dev[i], r[i], num_size[i]*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(Vect_Up_dev[i], Vect_Up[i], num_size[i]*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(Vect_F_dev[i], Vect_F[i], num_size[i]*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(Vect_H_dev[i], Vect_H[i], num_size[i]*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(H_dev[i], H[i], num_size[i]*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(F_dev[i], F[i], num_size[i]*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(pos_task_dev[i], pos_task[i], TK_NUM*sizeof(int), cudaMemcpyHostToDevice));
}
checkCudaErrors(cudaMemcpy(num_thread_dev, num_thread, N_ch*sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaDeviceSynchronize());
printf("Filterbank CUDA static fusion is running\n");
// task launch
start_timer = my_timer();
for(i = 0; i < BT_NUM; i++){
FBCore<<<TK_NUM, TDK_NUM>>>(r_dev[i], H_dev[i], Vect_H_dev[i],
Vect_Dn_dev[i], Vect_Up_dev[i], Vect_F_dev[i], F_dev[i], pos_task_dev[i], num_thread_dev, i);
}
checkCudaErrors(cudaDeviceSynchronize());
end_timer = my_timer();
printf("Filterbank CUDA static fusion Elapsed Time: %f Sec.\n", end_timer - start_timer);
start_timer = my_timer();
// Data transfer back to host
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaMemcpy(Vect_F[i], Vect_F_dev[i], num_size[i]*sizeof(float), cudaMemcpyDeviceToHost));
}
checkCudaErrors(cudaDeviceSynchronize());
#if 0
// CPU task launch
printf("CPU program running\n");
start_timer = my_timer();
for(i = 0; i < BT_NUM; i++){
h_FBCore(r[i], H[i], Vect_H[i],
Vect_Dn[i], Vect_Up[i], h_Vect_F[i], F[i], pos_task[i], num_thread, i);
}
end_timer = my_timer();
//printf("The CPU Elapsed time:%f Sec.\n", end_timer - start_timer);
/*Verify*/
printf("Verify\n");
int flag = 0;
for(i = 0; i < BT_NUM; i++){
for(j = 0; j < num_size[i]; j++){
if(abs(h_Vect_F[i][j] - Vect_F[i][j]) > 1e-3){
printf("Error:%f, %f, %d\n", h_Vect_F[i][j], Vect_F[i][j], i);
flag = 1;
break;
}
}
}
if(!flag) printf("Verify successfully\n");
#endif
/*Free Memory*/
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaFreeHost(r[i]));
checkCudaErrors(cudaFree(r_dev[i]));
checkCudaErrors(cudaFreeHost(H[i]));
checkCudaErrors(cudaFree(H_dev[i]));
checkCudaErrors(cudaFreeHost(F[i]));
checkCudaErrors(cudaFree(F_dev[i]));
checkCudaErrors(cudaFreeHost(Vect_H[i]));
checkCudaErrors(cudaFree(Vect_H_dev[i]));
checkCudaErrors(cudaFreeHost(Vect_Dn[i]));
checkCudaErrors(cudaFree(Vect_Dn_dev[i]));
checkCudaErrors(cudaFreeHost(Vect_Up[i]));
checkCudaErrors(cudaFree(Vect_Up_dev[i]));
checkCudaErrors(cudaFreeHost(Vect_F[i]));
checkCudaErrors(cudaFree(pos_task_dev[i]));
}
checkCudaErrors(cudaFree(num_thread_dev));
free(r);
free(H);
free(F);
free(Vect_H);
free(Vect_Dn);
free(Vect_Up);
free(Vect_F);
free(r_dev);
free(H_dev);
free(F_dev);
free(Vect_H_dev);
free(Vect_Dn_dev);
free(Vect_Up_dev);
free(Vect_F_dev);
free(pos_task_dev);
return 0;
}
__global__ void FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn,
float *Vect_Up, float *Vect_F, float *F, int *size,
int *threads, int index){
int tid = threadIdx.x;
int td;
int j, k;
td = threads[index*TK_NUM+blockIdx.x];
//convolving H
if(tid < td){
for (j=0; j< ((td*td/4)/td); j++){
for(k = 0; k < N_col; k++){
if(((j*td+tid)-k)>=0){
Vect_H[j*td+tid+size[blockIdx.x]] +=
(r[(j*td+tid)-k+size[blockIdx.x]]*H[k+size[blockIdx.x]]);
}
}
}
}
__syncthreads();
//Down Sampling
if(tid < td)
for (j=0; j < (td*td/4)/N_samp/td; j++){
Vect_Dn[(j*td+tid)+size[blockIdx.x]]
=Vect_H[(j*td+tid)*N_samp+size[blockIdx.x]];
}
//Up Sampling
if(tid < td)
for (j=0; j < (td*td/4)/N_samp/td;j++){
Vect_Up[(j*td+tid)*N_samp+size[blockIdx.x]]
=Vect_Dn[(j*td+tid)+size[blockIdx.x]];
}
__syncthreads();
//convolving F
if(tid < td){
for (j=0; j< ((td*td/4)/td); j++){
for(k = 0; k < N_col; k++){
if(((j*td+tid)-k)>=0){
Vect_F[j*td+tid+size[blockIdx.x]] +=
(F[k]*Vect_H[(j*td+tid)-k+size[blockIdx.x]]);
}
}
}
}
}
void h_FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn,
float *Vect_Up, float *Vect_F, float *F, int *size,
int *threads, int index){
int td, tid;
int i, j, k;
for(i = 0; i < TK_NUM; i++){
td = threads[index*TK_NUM+i];
//convolving H
for(tid = 0; tid < td; tid ++){
for (j=0; j< ((td*td/4)/td); j++){
for(k = 0; k < N_col; k++){
if(((j*td+tid)-k)>=0){
Vect_H[j*td+tid+size[i]] +=
(r[(j*td+tid)-k+size[i]]*H[k+size[i]]);
}
}
}
}
//Down Sampling
for(tid = 0; tid < td; tid ++)
for (j=0; j < (td*td/4)/N_samp/td; j++){
Vect_Dn[(j*td+tid)+size[i]]
=Vect_H[(j*td+tid)*N_samp+size[i]];
}
//Up Sampling
for(tid = 0; tid < td; tid ++)
for (j=0; j < (td*td/4)/N_samp/td;j++){
Vect_Up[(j*td+tid)*N_samp+size[i]]
=Vect_Dn[(j*td+tid)+size[i]];
}
//convolving F
for(tid = 0; tid < td; tid ++){
for (j=0; j< ((td*td/4)/td); j++){
for(k = 0; k < N_col; k++){
if(((j*td+tid)-k)>=0){
Vect_F[j*td+tid+size[i]]+=
(F[k]*Vect_H[(j*td+tid)-k+size[i]]);
}
}
}
}
}
}
|
000467c70f0ee54c6726165541c93cd50992336d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file roi_pooling.cu
* \brief roi pooling operator
* \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
*/
#include "./roi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
namespace mshadow {
namespace cuda {
template<typename Dtype>
__global__ void ROIPoolForwardKernel(const int count, const Dtype* bottom_data,
const float spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data,
Dtype* argmax_data) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
if (roi_batch_ind < 0) {
top_data[index] = 0;
argmax_data[index] = 0;
continue;
}
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = (Dtype)maxidx;
}
}
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
const Dtype *bottom_data = data.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *top_data = out.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward");
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
hipLaunchKernelGGL(( ROIPoolForwardKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream,
count, bottom_data, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_rois, top_data, argmax_data);
MSHADOW_CUDA_POST_KERNEL_CHECK(ROIPoolForwardKernel);
}
template<typename Dtype>
__global__ void ROIPoolBackwardAccKernel(const int count, const Dtype* top_diff,
const Dtype* argmax_data, const int num_rois,
const float spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
Dtype* bottom_diff, const Dtype* bottom_rois) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const Dtype* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (static_cast<int>(offset_argmax_data[ph * pooled_width + pw]) == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] += gradient;
}
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
const Dtype *top_diff = out_grad.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *bottom_diff = in_grad.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = in_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward");
hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
hipLaunchKernelGGL(( ROIPoolBackwardAccKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream,
count, top_diff, argmax_data, num_rois, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_diff, bottom_rois);
MSHADOW_CUDA_POST_KERNEL_CHECK(ROIPoolBackwardAccKernel);
}
} // namespace cuda
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
cuda::ROIPoolForward(out, data, bbox, max_idx, spatial_scale);
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
cuda::ROIPoolBackwardAcc(in_grad, out_grad, bbox, max_idx, spatial_scale);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(ROIPoolingParam param, int dtype) {
Operator* op = nullptr;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
| 000467c70f0ee54c6726165541c93cd50992336d.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file roi_pooling.cu
* \brief roi pooling operator
* \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
*/
#include "./roi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
namespace mshadow {
namespace cuda {
template<typename Dtype>
__global__ void ROIPoolForwardKernel(const int count, const Dtype* bottom_data,
const float spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data,
Dtype* argmax_data) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
if (roi_batch_ind < 0) {
top_data[index] = 0;
argmax_data[index] = 0;
continue;
}
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = (Dtype)maxidx;
}
}
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
const Dtype *bottom_data = data.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *top_data = out.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward");
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
ROIPoolForwardKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>(
count, bottom_data, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_rois, top_data, argmax_data);
MSHADOW_CUDA_POST_KERNEL_CHECK(ROIPoolForwardKernel);
}
template<typename Dtype>
__global__ void ROIPoolBackwardAccKernel(const int count, const Dtype* top_diff,
const Dtype* argmax_data, const int num_rois,
const float spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
Dtype* bottom_diff, const Dtype* bottom_rois) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const Dtype* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (static_cast<int>(offset_argmax_data[ph * pooled_width + pw]) == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] += gradient;
}
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
const Dtype *top_diff = out_grad.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *bottom_diff = in_grad.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = in_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward");
cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
ROIPoolBackwardAccKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>(
count, top_diff, argmax_data, num_rois, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_diff, bottom_rois);
MSHADOW_CUDA_POST_KERNEL_CHECK(ROIPoolBackwardAccKernel);
}
} // namespace cuda
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
cuda::ROIPoolForward(out, data, bbox, max_idx, spatial_scale);
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
cuda::ROIPoolBackwardAcc(in_grad, out_grad, bbox, max_idx, spatial_scale);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(ROIPoolingParam param, int dtype) {
Operator* op = nullptr;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
|
28325cdec497ff0b4809890d3f3d597a4bf9a480.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <cstdlib>
#include <limits>
#include "cycletimer.h"
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#define STB_IMAGE_IMPLEMENTATION
#include "../../utils/stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "../../utils/stb_image_write.h"
#define BLOCK_SIDE 1024
#define CHANNEL_NUM 3
#include <unistd.h>
#include <stdio.h>
#include <cstdlib>
typedef unsigned int uint;
struct Point;
struct Point {
unsigned int x, y; // coordinates
unsigned int r, g, b;
int count; // no default cluster
__device__ __host__ Point() :
x(0),
y(0),
r(0),
g(0),
b(0),
count(1) {}
__device__ __host__ Point(int count) :
x(0),
y(0),
r(0),
g(0),
b(0),
count(count) {}
__device__ __host__ Point(unsigned int x, unsigned int y, uint r, uint g, uint b) :
x(x),
y(y),
r(r),
g(g),
b(b),
count(1) {}
__device__ __host__ Point(unsigned int x, unsigned int y, uint r, uint g, uint b, int c) :
x(x),
y(y),
r(r),
g(g),
b(b),
count(c) {}
__device__ double color_distance(Point p){
double v = ((double)((p.r - r) * (p.r - r))) +
((double)((p.g - g) * (p.g - g))) +
((double)((p.b - b) * (p.b - b)));
return v;
}
__inline__ __device__ __host__ void sum(Point p){
x+=p.x;
y+=p.y;
r+=p.r;
b+=p.b;
g+=p.g;
count+=p.count;
}
__inline__ __device__ __host__ void diff(Point p){
x-=p.x;
y-=p.y;
r-=p.r;
b-=p.b;
g-=p.g;
count-=p.count;
}
__device__ __host__ void print_point(){
printf("X: %d, Y: %d\nR: %d, G: %d, B: %d\nCount: %d\n",
x,y,r,g,b,count);
}
};
__global__ void fill_points(Point* points, int height, int width, uint8_t* rgb_image){
int point = blockIdx.x * blockDim.x + threadIdx.x;
int total_num_points = width * height;
unsigned int x, y;
uint8_t r, g, b;
int point_channel = point*CHANNEL_NUM;
if(point<total_num_points){
int factor = (width*CHANNEL_NUM);
y = (unsigned int)(point_channel/factor);
x = (unsigned int)(point_channel%factor);
r = rgb_image[point_channel];
g = rgb_image[point_channel+1];
b = rgb_image[point_channel+2];
points[point] = (Point(x, y, r, g, b));
}
}
__global__ void set_new_img(Point* means, size_t* assignments, int total_num_points, uint8_t* new_img){
int point = blockIdx.x * blockDim.x + threadIdx.x;
Point p;
int c;
if(point<total_num_points)
{
c = assignments[point];
p = means[c]; //use means by cluster
new_img[CHANNEL_NUM*point] = p.r;
new_img[CHANNEL_NUM*point+1] = p.g;
new_img[CHANNEL_NUM*point+2] = p.b;
}
}
__global__ void set_assignments(Point* data, size_t* assignments, Point* means, int k, int total_num_points){
int point = blockIdx.x * blockDim.x + threadIdx.x;
if(point < total_num_points){
int assignment = 0;
Point p,m;
double best_distance = CHANNEL_NUM*256*256;
size_t best_cluster = 0;
for (size_t cluster = 0; cluster < k; ++cluster) {
p = data[point];
m = means[cluster];
double distance = p.color_distance(m);
if (distance < best_distance) {
best_distance = distance;
best_cluster = cluster;
assignment = best_cluster;
}
}
assignments[point] = assignment;
}
}
__global__ void cluster_from_blocks(Point *means_cluster_device, Point *means_block_device,
int cluster, int b){
int point = blockIdx.x * blockDim.x + threadIdx.x;
if(point == 0){
//printf("Cluster %d\n", cluster);
Point p = Point(0);
Point m;
//printf("CB b: %d\n", b);
for(int block_id = 0; block_id < b; block_id++){
//printf("CB Block ID: %d\n", block_id);
//printf("CB Block Val: %d\n", means_block_device[block_id]);
m = means_block_device[block_id];
//m.print_point();
p.sum(m);
}
int c = p.count;
p.r = p.r/c;
p.g = p.g/c;
p.b = p.b/c;
p.x = p.x/c;
p.y = p.y/c;
means_cluster_device[cluster] = p;
}
}
__global__ void mask_cluster(Point* data, size_t* assignments, int total_num_points,
int k, int cluster, Point *data_scratch){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < total_num_points){
if(assignments[id]==cluster){data_scratch[id] = data[id];}
else{data_scratch[id] = Point(0);}
//if(data_scratch[id].r > 1)printf("ID: %d, p.r: %d\n", id, data_scratch[id].r);
}
}
__global__ void sum_for_blocks(Point *data_scratch, Point *means_block,
int total_num_points, int NUM_CHUNKS){
int id = blockIdx.x * blockDim.x + threadIdx.x;
//this id between 0 and b = NUM_CHUNKS
if(id < NUM_CHUNKS){
int points_per_chunk = (total_num_points/NUM_CHUNKS) + 1;
int start_point_index = id*points_per_chunk;
int end_point_index = (id+1)*points_per_chunk;
Point p = Point(0);
Point d;
for(int i = start_point_index; (i < end_point_index) && (i < total_num_points); i++){
d = data_scratch[i];
if(d.count == 1) p.sum(d);
}
means_block[id] = p;
//p.print_point();
//printf("SFB Id: %d\n", id);
}
}
__global__ void shared_sum_for_blocks(Point *data_scratch, Point *means_block,
int total_num_points, int bb, int width, int height){
int id = blockIdx.x * blockDim.x + threadIdx.x;
//this id between 0 and b = NUM_CHUNKS
//printf("Shared %d\n", blockIdx.x);
const int b = bb;
__shared__ uint8_t reds[BLOCK_SIDE];
__shared__ uint8_t greens[BLOCK_SIDE];
__shared__ uint8_t blues[BLOCK_SIDE];
__shared__ uint8_t flags[BLOCK_SIDE];
__shared__ uint xs[BLOCK_SIDE];
__shared__ uint ys[BLOCK_SIDE];
if(id < total_num_points){
//uint row = id/width;
//uint col = id%width;
Point p = data_scratch[id];
uint8_t flag;
if(p.count == 1){flag = 1;}
else{flag=0;}
uint8_t red = (uint8_t)p.r;
uint8_t blue = (uint8_t)p.b;
uint8_t green = (uint8_t)p.g;
uint x = (uint)p.x;
uint y = (uint)p.y;
reds[threadIdx.x] = red;
blues[threadIdx.x] = blue;
greens[threadIdx.x] = green;
flags[threadIdx.x] = flag;
xs[threadIdx.x] = x;
ys[threadIdx.x] = y;
}
__syncthreads();
if(id < total_num_points){
//printf("Set block: %d\n", blockIdx.x);
if(threadIdx.x == 1){
uint tr = 0;
uint tg = 0;
uint tb = 0;
uint tx = 0;
uint ty = 0;
uint tc = 0;
for(int i = 0; i < b; i++){
if(flags[i] == 1){
tc+=1;
tr=tr+((uint)reds[i]);
tg=tg+((uint)greens[i]);
tb=tb+((uint)blues[i]);
tx+=xs[i];
ty+=ys[i];
//printf("Shared %d red %d tr %d\n", i, reds[i], tr);
}
}
//printf("%d Red %d\n", blockIdx.x, tr);
Point m = Point(tx,ty,tr,tg,tb,tc);
means_block[blockIdx.x] = m;
//m.print_point();
//printf("SFB Id: %d\n", id);
}
}
}
void update_mean(dim3 gridDim, dim3 threadsPerBlock,
Point *means_block, Point* data, size_t* assignments,
int total_num_points, int k, int cluster, Point *data_scratch, int b, int width, int height){
//mask & keep points only within cluster instead of all k clusters
hipLaunchKernelGGL(( mask_cluster), dim3(gridDim), dim3(threadsPerBlock), 0, 0, data, assignments, total_num_points,
k, cluster, data_scratch);
//printf("wtf");
//sum_for_blocks<<<gridDim, threadsPerBlock>>>(data_scratch, means_block, total_num_points, b);
hipLaunchKernelGGL(( shared_sum_for_blocks), dim3(gridDim), dim3(threadsPerBlock), 0, 0, data_scratch, means_block,
total_num_points, b, width, height);
//printf("xxx");
}
void k_means_main(dim3 gridDim, dim3 threadsPerBlock,
Point* points, Point *means_cluster_device, Point *means_block_device,
size_t* assignments, int number_of_iterations, int k, int b,
int height, int width, uint8_t* rgb_image, uint8_t* new_img, Point *data_scratch){
int total_num_points = width*height;
hipLaunchKernelGGL(( fill_points), dim3(gridDim), dim3(threadsPerBlock), 0, 0, points, height, width, rgb_image);
/*Step 2 from comments*/
for(int i = 0; i< number_of_iterations; i++){
hipLaunchKernelGGL(( set_assignments), dim3(gridDim), dim3(threadsPerBlock), 0, 0, points, assignments,
means_cluster_device, k, total_num_points);
for(int cluster = 0; cluster < k; cluster++){
//printf("UM1\n");
update_mean(gridDim, threadsPerBlock,
means_block_device, points, assignments,
total_num_points, k, cluster, data_scratch, b, width, height);
//printf("UM2\n");
hipLaunchKernelGGL(( cluster_from_blocks), dim3(gridDim), dim3(threadsPerBlock), 0, 0, means_cluster_device,
means_block_device, cluster, b);
//printf("Cluster: %d, Iter: %d\n", cluster, i);
}
}
//at the very end, after accumulating blockwise sums and all, set new image
hipLaunchKernelGGL(( set_new_img), dim3(gridDim), dim3(threadsPerBlock), 0, 0, means_cluster_device, assignments, total_num_points, new_img);
}
/*Init means for all blocks. & for all clusters.
Since we will update means for clusters in sequence
we only need num_blocks number of slots. */
void set_init_means(uint8_t *rgb_image, Point *means_cluster_host, Point *means_block_host,
int k, int b, int width, int height){
unsigned int x, y;
uint8_t r, g, bl;
int factor, init_ind;
for(int cluster_index = 0; cluster_index < k; cluster_index++){
factor = (width*CHANNEL_NUM);
init_ind = CHANNEL_NUM*cluster_index*((height*width)/k);
y = (unsigned int)(init_ind/factor);
x = (unsigned int)(init_ind%factor);
r = rgb_image[init_ind];
g = rgb_image[init_ind+1];
bl = rgb_image[init_ind+2];
means_cluster_host[cluster_index] = Point(x,y,r,g,bl,0);
//means_cluster_host[cluster_index].print_point();
}
for(int block_index = 0; block_index < b; block_index++){
means_block_host[block_index] = Point();
}
}
void k_means(uint8_t* rgb_image, int width, int height,
size_t k, size_t number_of_iterations, int b) {
int total_points = width*height;
int total_cpoints = total_points*CHANNEL_NUM;
//parallelize over pixels kernel dims
// dim3 threadsPerBlock(BLOCK_SIDE, 1, 1);
// const int NUM_BLOCKS_X = (total_points+threadsPerBlock.x-1)/threadsPerBlock.x;
// const int NUM_BLOCKS_Y = 1;
// dim3 gridDim(NUM_BLOCKS_X , NUM_BLOCKS_Y, 1);
// int b = NUM_BLOCKS_X;
//create chunk grid dims
//total_points/NUM_CHUNKS * NUM_CHUNKS + total_points%NUM_CHUNKS = total_points
//printf("TPB: %d, NC: %d\n", NUM_THREADS_XC, gridDimC.x);
const int NUM_THREADS_XC = (total_points/b) + 1;
dim3 threadsPerBlock(NUM_THREADS_XC, 1, 1);
dim3 gridDim(b , 1, 1);
//initialize means in before launching kernels since k will typically
//be much smaller compared to image sizes
Point *means_cluster_host = (Point*) malloc(sizeof(Point) * k);
Point *means_block_host = (Point*) malloc(sizeof(Point) * b);
set_init_means(rgb_image, means_cluster_host, means_block_host, k, b, width, height);
//GPU mallocs
Point* means_cluster_device;
Point* means_block_device;
Point* points_device;
Point* data_scratch;
size_t* assignments_device;
uint8_t* new_img_device;
uint8_t* rgb_img_device;
uint8_t* new_img = (uint8_t*)malloc(sizeof(uint8_t) * total_cpoints);
hipMalloc(&means_cluster_device, sizeof(Point) * k);
hipMalloc(&means_block_device, sizeof(Point) * b);
hipMalloc(&points_device, sizeof(Point) * total_points);
hipMalloc(&data_scratch, sizeof(Point) * total_points);
hipMalloc(&assignments_device, sizeof(size_t) * total_points);
hipMalloc(&new_img_device, sizeof(uint8_t) * total_cpoints);
hipMalloc(&rgb_img_device, sizeof(uint8_t) * total_cpoints);
//copy from host to GPU
hipMemcpy(rgb_img_device, rgb_image, sizeof(uint8_t) * total_cpoints, hipMemcpyHostToDevice);
hipMemcpy(means_cluster_device, means_cluster_host, sizeof(Point) * k, hipMemcpyHostToDevice);
hipMemcpy(means_block_device, means_block_host, sizeof(Point) * b, hipMemcpyHostToDevice);
//time main computational functions
double start_time_exc = currentSeconds();
k_means_main(gridDim, threadsPerBlock,
points_device, means_cluster_device, means_block_device,
assignments_device, number_of_iterations, k, b,
height, width, rgb_img_device, new_img_device, data_scratch);
double end_time = currentSeconds();
double duration_exc = end_time - start_time_exc;
printf("%f, ", duration_exc);
//copy image back into host from device
hipMemcpy(new_img, new_img_device, sizeof(uint8_t) * total_cpoints, hipMemcpyDeviceToHost);
stbi_write_png("out.png", width, height, CHANNEL_NUM, new_img, width*CHANNEL_NUM);
}
int main_single(int argc, char **argv){
const char *img_file = argv[1];//"images/cs_test1.jpg";
int NUM_CLUSTERS = atoi(argv[2]);
int NUM_ITERS = atoi(argv[3]);
int NUM_CHUNKS = atoi(argv[4]);
int width, height, bpp;
uint8_t* rgb_image = stbi_load(img_file, &width, &height,
&bpp, CHANNEL_NUM);
k_means(rgb_image, width, height, NUM_CLUSTERS, NUM_ITERS, NUM_CHUNKS);
return 1;
}
int main(int argc, char **argv){
int NUM_IMGS_all = 2;
int NUM_CLUSTERS_all = 3;//atoi(argv[2]);
int NUM_ITERS_all = 2048; //atoi(argv[3]);
int NUM_CHUNKS_all = 1; //atoi(argv[4]);
static const char* imgs[] = {"../../images/small.jpg",
"../../images/medium.jpg", "../../images/large.jpg"};
static const int chunks[] = {BLOCK_SIDE};//{32, 64, 128, 156, 192, 256, 384, 512};
for(int i = 0; i < NUM_IMGS_all; i++){
printf("Image %d\n", i);
for(int j = 0; j < NUM_CHUNKS_all; j++){
int width, height, bpp;
uint8_t* rgb_image = stbi_load(imgs[i], &width, &height,
&bpp, CHANNEL_NUM);
//printf("read");
k_means(rgb_image, width, height, NUM_CLUSTERS_all,
NUM_ITERS_all, chunks[j]);
}
printf("\n");
}
return 1;
}
| 28325cdec497ff0b4809890d3f3d597a4bf9a480.cu | #include <algorithm>
#include <cstdlib>
#include <limits>
#include "cycletimer.h"
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#define STB_IMAGE_IMPLEMENTATION
#include "../../utils/stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "../../utils/stb_image_write.h"
#define BLOCK_SIDE 1024
#define CHANNEL_NUM 3
#include <unistd.h>
#include <stdio.h>
#include <cstdlib>
typedef unsigned int uint;
struct Point;
struct Point {
unsigned int x, y; // coordinates
unsigned int r, g, b;
int count; // no default cluster
__device__ __host__ Point() :
x(0),
y(0),
r(0),
g(0),
b(0),
count(1) {}
__device__ __host__ Point(int count) :
x(0),
y(0),
r(0),
g(0),
b(0),
count(count) {}
__device__ __host__ Point(unsigned int x, unsigned int y, uint r, uint g, uint b) :
x(x),
y(y),
r(r),
g(g),
b(b),
count(1) {}
__device__ __host__ Point(unsigned int x, unsigned int y, uint r, uint g, uint b, int c) :
x(x),
y(y),
r(r),
g(g),
b(b),
count(c) {}
__device__ double color_distance(Point p){
double v = ((double)((p.r - r) * (p.r - r))) +
((double)((p.g - g) * (p.g - g))) +
((double)((p.b - b) * (p.b - b)));
return v;
}
__inline__ __device__ __host__ void sum(Point p){
x+=p.x;
y+=p.y;
r+=p.r;
b+=p.b;
g+=p.g;
count+=p.count;
}
__inline__ __device__ __host__ void diff(Point p){
x-=p.x;
y-=p.y;
r-=p.r;
b-=p.b;
g-=p.g;
count-=p.count;
}
__device__ __host__ void print_point(){
printf("X: %d, Y: %d\nR: %d, G: %d, B: %d\nCount: %d\n",
x,y,r,g,b,count);
}
};
__global__ void fill_points(Point* points, int height, int width, uint8_t* rgb_image){
int point = blockIdx.x * blockDim.x + threadIdx.x;
int total_num_points = width * height;
unsigned int x, y;
uint8_t r, g, b;
int point_channel = point*CHANNEL_NUM;
if(point<total_num_points){
int factor = (width*CHANNEL_NUM);
y = (unsigned int)(point_channel/factor);
x = (unsigned int)(point_channel%factor);
r = rgb_image[point_channel];
g = rgb_image[point_channel+1];
b = rgb_image[point_channel+2];
points[point] = (Point(x, y, r, g, b));
}
}
__global__ void set_new_img(Point* means, size_t* assignments, int total_num_points, uint8_t* new_img){
int point = blockIdx.x * blockDim.x + threadIdx.x;
Point p;
int c;
if(point<total_num_points)
{
c = assignments[point];
p = means[c]; //use means by cluster
new_img[CHANNEL_NUM*point] = p.r;
new_img[CHANNEL_NUM*point+1] = p.g;
new_img[CHANNEL_NUM*point+2] = p.b;
}
}
__global__ void set_assignments(Point* data, size_t* assignments, Point* means, int k, int total_num_points){
int point = blockIdx.x * blockDim.x + threadIdx.x;
if(point < total_num_points){
int assignment = 0;
Point p,m;
double best_distance = CHANNEL_NUM*256*256;
size_t best_cluster = 0;
for (size_t cluster = 0; cluster < k; ++cluster) {
p = data[point];
m = means[cluster];
double distance = p.color_distance(m);
if (distance < best_distance) {
best_distance = distance;
best_cluster = cluster;
assignment = best_cluster;
}
}
assignments[point] = assignment;
}
}
__global__ void cluster_from_blocks(Point *means_cluster_device, Point *means_block_device,
int cluster, int b){
int point = blockIdx.x * blockDim.x + threadIdx.x;
if(point == 0){
//printf("Cluster %d\n", cluster);
Point p = Point(0);
Point m;
//printf("CB b: %d\n", b);
for(int block_id = 0; block_id < b; block_id++){
//printf("CB Block ID: %d\n", block_id);
//printf("CB Block Val: %d\n", means_block_device[block_id]);
m = means_block_device[block_id];
//m.print_point();
p.sum(m);
}
int c = p.count;
p.r = p.r/c;
p.g = p.g/c;
p.b = p.b/c;
p.x = p.x/c;
p.y = p.y/c;
means_cluster_device[cluster] = p;
}
}
__global__ void mask_cluster(Point* data, size_t* assignments, int total_num_points,
int k, int cluster, Point *data_scratch){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < total_num_points){
if(assignments[id]==cluster){data_scratch[id] = data[id];}
else{data_scratch[id] = Point(0);}
//if(data_scratch[id].r > 1)printf("ID: %d, p.r: %d\n", id, data_scratch[id].r);
}
}
__global__ void sum_for_blocks(Point *data_scratch, Point *means_block,
int total_num_points, int NUM_CHUNKS){
int id = blockIdx.x * blockDim.x + threadIdx.x;
//this id between 0 and b = NUM_CHUNKS
if(id < NUM_CHUNKS){
int points_per_chunk = (total_num_points/NUM_CHUNKS) + 1;
int start_point_index = id*points_per_chunk;
int end_point_index = (id+1)*points_per_chunk;
Point p = Point(0);
Point d;
for(int i = start_point_index; (i < end_point_index) && (i < total_num_points); i++){
d = data_scratch[i];
if(d.count == 1) p.sum(d);
}
means_block[id] = p;
//p.print_point();
//printf("SFB Id: %d\n", id);
}
}
__global__ void shared_sum_for_blocks(Point *data_scratch, Point *means_block,
int total_num_points, int bb, int width, int height){
int id = blockIdx.x * blockDim.x + threadIdx.x;
//this id between 0 and b = NUM_CHUNKS
//printf("Shared %d\n", blockIdx.x);
const int b = bb;
__shared__ uint8_t reds[BLOCK_SIDE];
__shared__ uint8_t greens[BLOCK_SIDE];
__shared__ uint8_t blues[BLOCK_SIDE];
__shared__ uint8_t flags[BLOCK_SIDE];
__shared__ uint xs[BLOCK_SIDE];
__shared__ uint ys[BLOCK_SIDE];
if(id < total_num_points){
//uint row = id/width;
//uint col = id%width;
Point p = data_scratch[id];
uint8_t flag;
if(p.count == 1){flag = 1;}
else{flag=0;}
uint8_t red = (uint8_t)p.r;
uint8_t blue = (uint8_t)p.b;
uint8_t green = (uint8_t)p.g;
uint x = (uint)p.x;
uint y = (uint)p.y;
reds[threadIdx.x] = red;
blues[threadIdx.x] = blue;
greens[threadIdx.x] = green;
flags[threadIdx.x] = flag;
xs[threadIdx.x] = x;
ys[threadIdx.x] = y;
}
__syncthreads();
if(id < total_num_points){
//printf("Set block: %d\n", blockIdx.x);
if(threadIdx.x == 1){
uint tr = 0;
uint tg = 0;
uint tb = 0;
uint tx = 0;
uint ty = 0;
uint tc = 0;
for(int i = 0; i < b; i++){
if(flags[i] == 1){
tc+=1;
tr=tr+((uint)reds[i]);
tg=tg+((uint)greens[i]);
tb=tb+((uint)blues[i]);
tx+=xs[i];
ty+=ys[i];
//printf("Shared %d red %d tr %d\n", i, reds[i], tr);
}
}
//printf("%d Red %d\n", blockIdx.x, tr);
Point m = Point(tx,ty,tr,tg,tb,tc);
means_block[blockIdx.x] = m;
//m.print_point();
//printf("SFB Id: %d\n", id);
}
}
}
void update_mean(dim3 gridDim, dim3 threadsPerBlock,
Point *means_block, Point* data, size_t* assignments,
int total_num_points, int k, int cluster, Point *data_scratch, int b, int width, int height){
//mask & keep points only within cluster instead of all k clusters
mask_cluster<<<gridDim, threadsPerBlock>>>(data, assignments, total_num_points,
k, cluster, data_scratch);
//printf("wtf");
//sum_for_blocks<<<gridDim, threadsPerBlock>>>(data_scratch, means_block, total_num_points, b);
shared_sum_for_blocks<<<gridDim, threadsPerBlock>>>(data_scratch, means_block,
total_num_points, b, width, height);
//printf("xxx");
}
void k_means_main(dim3 gridDim, dim3 threadsPerBlock,
Point* points, Point *means_cluster_device, Point *means_block_device,
size_t* assignments, int number_of_iterations, int k, int b,
int height, int width, uint8_t* rgb_image, uint8_t* new_img, Point *data_scratch){
int total_num_points = width*height;
fill_points<<<gridDim, threadsPerBlock>>>(points, height, width, rgb_image);
/*Step 2 from comments*/
for(int i = 0; i< number_of_iterations; i++){
set_assignments<<<gridDim, threadsPerBlock>>>(points, assignments,
means_cluster_device, k, total_num_points);
for(int cluster = 0; cluster < k; cluster++){
//printf("UM1\n");
update_mean(gridDim, threadsPerBlock,
means_block_device, points, assignments,
total_num_points, k, cluster, data_scratch, b, width, height);
//printf("UM2\n");
cluster_from_blocks<<<gridDim, threadsPerBlock>>>(means_cluster_device,
means_block_device, cluster, b);
//printf("Cluster: %d, Iter: %d\n", cluster, i);
}
}
//at the very end, after accumulating blockwise sums and all, set new image
set_new_img<<<gridDim, threadsPerBlock>>>(means_cluster_device, assignments, total_num_points, new_img);
}
/*Init means for all blocks. & for all clusters.
Since we will update means for clusters in sequence
we only need num_blocks number of slots. */
void set_init_means(uint8_t *rgb_image, Point *means_cluster_host, Point *means_block_host,
int k, int b, int width, int height){
unsigned int x, y;
uint8_t r, g, bl;
int factor, init_ind;
for(int cluster_index = 0; cluster_index < k; cluster_index++){
factor = (width*CHANNEL_NUM);
init_ind = CHANNEL_NUM*cluster_index*((height*width)/k);
y = (unsigned int)(init_ind/factor);
x = (unsigned int)(init_ind%factor);
r = rgb_image[init_ind];
g = rgb_image[init_ind+1];
bl = rgb_image[init_ind+2];
means_cluster_host[cluster_index] = Point(x,y,r,g,bl,0);
//means_cluster_host[cluster_index].print_point();
}
for(int block_index = 0; block_index < b; block_index++){
means_block_host[block_index] = Point();
}
}
void k_means(uint8_t* rgb_image, int width, int height,
size_t k, size_t number_of_iterations, int b) {
int total_points = width*height;
int total_cpoints = total_points*CHANNEL_NUM;
//parallelize over pixels kernel dims
// dim3 threadsPerBlock(BLOCK_SIDE, 1, 1);
// const int NUM_BLOCKS_X = (total_points+threadsPerBlock.x-1)/threadsPerBlock.x;
// const int NUM_BLOCKS_Y = 1;
// dim3 gridDim(NUM_BLOCKS_X , NUM_BLOCKS_Y, 1);
// int b = NUM_BLOCKS_X;
//create chunk grid dims
//total_points/NUM_CHUNKS * NUM_CHUNKS + total_points%NUM_CHUNKS = total_points
//printf("TPB: %d, NC: %d\n", NUM_THREADS_XC, gridDimC.x);
const int NUM_THREADS_XC = (total_points/b) + 1;
dim3 threadsPerBlock(NUM_THREADS_XC, 1, 1);
dim3 gridDim(b , 1, 1);
//initialize means in before launching kernels since k will typically
//be much smaller compared to image sizes
Point *means_cluster_host = (Point*) malloc(sizeof(Point) * k);
Point *means_block_host = (Point*) malloc(sizeof(Point) * b);
set_init_means(rgb_image, means_cluster_host, means_block_host, k, b, width, height);
//GPU mallocs
Point* means_cluster_device;
Point* means_block_device;
Point* points_device;
Point* data_scratch;
size_t* assignments_device;
uint8_t* new_img_device;
uint8_t* rgb_img_device;
uint8_t* new_img = (uint8_t*)malloc(sizeof(uint8_t) * total_cpoints);
cudaMalloc(&means_cluster_device, sizeof(Point) * k);
cudaMalloc(&means_block_device, sizeof(Point) * b);
cudaMalloc(&points_device, sizeof(Point) * total_points);
cudaMalloc(&data_scratch, sizeof(Point) * total_points);
cudaMalloc(&assignments_device, sizeof(size_t) * total_points);
cudaMalloc(&new_img_device, sizeof(uint8_t) * total_cpoints);
cudaMalloc(&rgb_img_device, sizeof(uint8_t) * total_cpoints);
//copy from host to GPU
cudaMemcpy(rgb_img_device, rgb_image, sizeof(uint8_t) * total_cpoints, cudaMemcpyHostToDevice);
cudaMemcpy(means_cluster_device, means_cluster_host, sizeof(Point) * k, cudaMemcpyHostToDevice);
cudaMemcpy(means_block_device, means_block_host, sizeof(Point) * b, cudaMemcpyHostToDevice);
//time main computational functions
double start_time_exc = currentSeconds();
k_means_main(gridDim, threadsPerBlock,
points_device, means_cluster_device, means_block_device,
assignments_device, number_of_iterations, k, b,
height, width, rgb_img_device, new_img_device, data_scratch);
double end_time = currentSeconds();
double duration_exc = end_time - start_time_exc;
printf("%f, ", duration_exc);
//copy image back into host from device
cudaMemcpy(new_img, new_img_device, sizeof(uint8_t) * total_cpoints, cudaMemcpyDeviceToHost);
stbi_write_png("out.png", width, height, CHANNEL_NUM, new_img, width*CHANNEL_NUM);
}
int main_single(int argc, char **argv){
const char *img_file = argv[1];//"images/cs_test1.jpg";
int NUM_CLUSTERS = atoi(argv[2]);
int NUM_ITERS = atoi(argv[3]);
int NUM_CHUNKS = atoi(argv[4]);
int width, height, bpp;
uint8_t* rgb_image = stbi_load(img_file, &width, &height,
&bpp, CHANNEL_NUM);
k_means(rgb_image, width, height, NUM_CLUSTERS, NUM_ITERS, NUM_CHUNKS);
return 1;
}
int main(int argc, char **argv){
int NUM_IMGS_all = 2;
int NUM_CLUSTERS_all = 3;//atoi(argv[2]);
int NUM_ITERS_all = 2048; //atoi(argv[3]);
int NUM_CHUNKS_all = 1; //atoi(argv[4]);
static const char* imgs[] = {"../../images/small.jpg",
"../../images/medium.jpg", "../../images/large.jpg"};
static const int chunks[] = {BLOCK_SIDE};//{32, 64, 128, 156, 192, 256, 384, 512};
for(int i = 0; i < NUM_IMGS_all; i++){
printf("Image %d\n", i);
for(int j = 0; j < NUM_CHUNKS_all; j++){
int width, height, bpp;
uint8_t* rgb_image = stbi_load(imgs[i], &width, &height,
&bpp, CHANNEL_NUM);
//printf("read");
k_means(rgb_image, width, height, NUM_CLUSTERS_all,
NUM_ITERS_all, chunks[j]);
}
printf("\n");
}
return 1;
}
|
simpleVote_kernel.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#ifndef SIMPLEVOTE_KERNEL_CU
#define SIMPLEVOTE_KERNEL_CU
////////////////////////////////////////////////////////////////////////////////
// Vote Any/All intrinsic kernel function tests are supported only by CUDA
// capable devices that are CUDA hardware that has SM1.2 or later
// Vote Functions (refer to section 4.4.5 in the CUDA Orogramming Guide)
////////////////////////////////////////////////////////////////////////////////
// Kernel #1 tests the across-the-warp vote(any) intrinsic.
// If ANY one of the threads (within the warp) of the predicated condition returns
// a non-zero value, then all threads within this warp will return a non-zero value
__global__ void VoteAnyKernel1(unsigned int *input, unsigned int *result, int size)
{
int tx = threadIdx.x + blockDim.x * blockIdx.x;
result[tx] = any(input[tx]);
}
// Kernel #2 tests the across-the-warp vote(all) intrinsic.
// If ALL of the threads (within the warp) of the predicated condition returns
// a non-zero value, then all threads within this warp will return a non-zero value
__global__ void VoteAllKernel2(unsigned int *input, unsigned int *result, int size)
{
int tx = threadIdx.x + blockDim.x * blockIdx.x;
result[tx] = all(input[tx]);
}
// Kernel #3 is a directed test for the across-the-warp vote(all) intrinsic.
// This kernel will test for conditions across warps, and within half warps
__global__ void VoteAnyKernel3(bool *info, int warp_size)
{
int tx = threadIdx.x + blockDim.x * blockIdx.x;
bool *offs = info + (tx * 3);
// The following should hold true for the second and third warp
*offs = any((tx >= (warp_size * 3) / 2));
// The following should hold true for the "upper half" of the second warp,
// and all of the third warp
*(offs + 1) = (tx >= (warp_size * 3) / 2? true: false);
// The following should hold true for the third warp only
if(all((tx >= (warp_size * 3) / 2))) {
*(offs + 2) = true;
}
}
#endif
| simpleVote_kernel.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#ifndef SIMPLEVOTE_KERNEL_CU
#define SIMPLEVOTE_KERNEL_CU
////////////////////////////////////////////////////////////////////////////////
// Vote Any/All intrinsic kernel function tests are supported only by CUDA
// capable devices that are CUDA hardware that has SM1.2 or later
// Vote Functions (refer to section 4.4.5 in the CUDA Orogramming Guide)
////////////////////////////////////////////////////////////////////////////////
// Kernel #1 tests the across-the-warp vote(any) intrinsic.
// If ANY one of the threads (within the warp) of the predicated condition returns
// a non-zero value, then all threads within this warp will return a non-zero value
__global__ void VoteAnyKernel1(unsigned int *input, unsigned int *result, int size)
{
int tx = threadIdx.x + blockDim.x * blockIdx.x;
result[tx] = any(input[tx]);
}
// Kernel #2 tests the across-the-warp vote(all) intrinsic.
// If ALL of the threads (within the warp) of the predicated condition returns
// a non-zero value, then all threads within this warp will return a non-zero value
__global__ void VoteAllKernel2(unsigned int *input, unsigned int *result, int size)
{
int tx = threadIdx.x + blockDim.x * blockIdx.x;
result[tx] = all(input[tx]);
}
// Kernel #3 is a directed test for the across-the-warp vote(all) intrinsic.
// This kernel will test for conditions across warps, and within half warps
__global__ void VoteAnyKernel3(bool *info, int warp_size)
{
int tx = threadIdx.x + blockDim.x * blockIdx.x;
bool *offs = info + (tx * 3);
// The following should hold true for the second and third warp
*offs = any((tx >= (warp_size * 3) / 2));
// The following should hold true for the "upper half" of the second warp,
// and all of the third warp
*(offs + 1) = (tx >= (warp_size * 3) / 2? true: false);
// The following should hold true for the third warp only
if(all((tx >= (warp_size * 3) / 2))) {
*(offs + 2) = true;
}
}
#endif
|
simpleVote_kernel.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#ifndef SIMPLEVOTE_KERNEL_CU
#define SIMPLEVOTE_KERNEL_CU
////////////////////////////////////////////////////////////////////////////////
// Vote Any/All intrinsic kernel function tests are supported only by CUDA
// capable devices that are CUDA hardware that has SM1.2 or later
// Vote Functions (refer to section 4.4.5 in the CUDA Orogramming Guide)
////////////////////////////////////////////////////////////////////////////////
// Kernel #1 tests the across-the-warp vote(any) intrinsic.
// If ANY one of the threads (within the warp) of the predicated condition returns
// a non-zero value, then all threads within this warp will return a non-zero value
__global__ void VoteAnyKernel1(unsigned int *input, unsigned int *result, int size)
{
int tx = threadIdx.x + blockDim.x * blockIdx.x;
result[tx] = any(input[tx]);
}
// Kernel #2 tests the across-the-warp vote(all) intrinsic.
// If ALL of the threads (within the warp) of the predicated condition returns
// a non-zero value, then all threads within this warp will return a non-zero value
__global__ void VoteAllKernel2(unsigned int *input, unsigned int *result, int size)
{
int tx = threadIdx.x + blockDim.x * blockIdx.x;
result[tx] = all(input[tx]);
}
// Kernel #3 is a directed test for the across-the-warp vote(all) intrinsic.
// This kernel will test for conditions across warps, and within half warps
__global__ void VoteAnyKernel3(bool *info, int warp_size)
{
int tx = threadIdx.x + blockDim.x * blockIdx.x;
bool *offs = info + (tx * 3);
// The following should hold true for the second and third warp
*offs = any((tx >= (warp_size * 3) / 2));
// The following should hold true for the "upper half" of the second warp,
// and all of the third warp
*(offs + 1) = (tx >= (warp_size * 3) / 2? true: false);
// The following should hold true for the third warp only
if(all((tx >= (warp_size * 3) / 2))) {
*(offs + 2) = true;
}
}
#endif
| simpleVote_kernel.cuh | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#ifndef SIMPLEVOTE_KERNEL_CU
#define SIMPLEVOTE_KERNEL_CU
////////////////////////////////////////////////////////////////////////////////
// Vote Any/All intrinsic kernel function tests are supported only by CUDA
// capable devices that are CUDA hardware that has SM1.2 or later
// Vote Functions (refer to section 4.4.5 in the CUDA Programming Guide)
////////////////////////////////////////////////////////////////////////////////
// Kernel #1 tests the across-the-warp vote(any) intrinsic.
// If ANY one of the threads (within the warp) of the predicated condition returns
// a non-zero value, then all threads within this warp will return a non-zero value
__global__ void VoteAnyKernel1(unsigned int *input, unsigned int *result, int size)
{
int tx = threadIdx.x;
result[tx] = any(input[tx]);
}
// Kernel #2 tests the across-the-warp vote(all) intrinsic.
// If ALL of the threads (within the warp) of the predicated condition returns
// a non-zero value, then all threads within this warp will return a non-zero value
__global__ void VoteAllKernel2(unsigned int *input, unsigned int *result, int size)
{
int tx = threadIdx.x;
result[tx] = all(input[tx]);
}
// Kernel #3 is a directed test for the across-the-warp vote(all) intrinsic.
// This kernel will test for conditions across warps, and within half warps
__global__ void VoteAnyKernel3(bool *info, int warp_size)
{
int tx = threadIdx.x;
bool *offs = info + (tx * 3);
// The following should hold true for the second and third warp
*offs = any((tx >= (warp_size * 3) / 2));
// The following should hold true for the "upper half" of the second warp,
// and all of the third warp
*(offs + 1) = (tx >= (warp_size * 3) / 2? true: false);
// The following should hold true for the third warp only
if (all((tx >= (warp_size * 3) / 2)))
{
*(offs + 2) = true;
}
}
#endif
|
fe3fa5506d9663f96c50fd32cb2d894c0e4bc910.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _OPTIMISATION_KERNEL_Z_H_
#define _OPTIMISATION_KERNEL_Z_H_
#include "GpGpu/GpGpu_StreamData.cuh"
#include "GpGpu/SData2Optimize.h"
// On pourrait imaginer un buffer des tailles calculer en parallel
// SIZEBUFFER[threadIdx.x] = count(lI[threadIdx.x]);
__device__ void GetConeZ(short2 & aDz, int aZ, int MaxDeltaZ, short2 aZ_Next, short2 aZ_Prev)
{
aDz.x = aZ_Prev.x-aZ;
if (aZ != aZ_Next.x)
aDz.x = max(aDz.x,-MaxDeltaZ);
aDz.y = aZ_Prev.y-1-aZ;
if (aZ != aZ_Next.y-1)
aDz.y = min(aDz.y,MaxDeltaZ);
if (aDz.x > aDz.y)
if (aDz.y <0)
aDz.x = aDz.y;
else
aDz.y = aDz.x;
}
__device__ void BasicComputeIntervaleDelta
(
short2 & aDz,
int aZ,
int MaxDeltaZ,
short2 aZ_Prev
)
{
aDz.x = max(-MaxDeltaZ,aZ_Prev.x-aZ);
aDz.y = min(MaxDeltaZ,aZ_Prev.y-1-aZ);
}
inline __device__ uint minR(uint *sMin, uint &globalMin){ // TODO attention ajout de inline
ushort thread2;
uint temp;
//
int nTotalThreads = WARPSIZE; // Total number of threads, rounded up to the next power of two
while(nTotalThreads > 1)
{
int halfPoint = (nTotalThreads >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint)
{
thread2 = threadIdx.x + halfPoint;
// Skipping the fictious threads blockDim.x ... blockDim_2-1
if (thread2 < blockDim.x)
{
// Get the shared value stored by another thread
temp = sMin[thread2];
if (temp < sMin[threadIdx.x])
sMin[threadIdx.x] = temp;
}
}
// Reducing the binary tree size by two:
nTotalThreads = halfPoint;
}
const uint minus = sMin[0];
if(minus < globalMin) globalMin = minus;
return minus;
}
template<bool sens> __device__
inline uint __choose(uint kav,uint kar)
{
return 0;
}
template<> __device__
inline uint __choose<true>(uint kav,uint kar)
{
return kav;
}
template<> __device__
inline uint __choose<false>(uint kav,uint kar)
{
return kar;
}
template<bool sens> __device__
inline ushort __choose(ushort kav,ushort kar)
{
return 0;
}
template<> __device__
inline ushort __choose<true>(ushort kav,ushort kar)
{
return kav;
}
template<> __device__
inline ushort __choose<false>(ushort kav,ushort kar)
{
return kar;
}
template<bool sens> __device__
inline short __choose(short kav,short kar)
{
return 0;
}
template<> __device__
inline short __choose<true>(short kav,short kar)
{
return kav;
}
template<> __device__
inline short __choose<false>(short kav,short kar)
{
return kar;
}
template<bool autoMask> __device__
inline void getIntervale(short2 & aDz, int aZ, int MaxDeltaZ, short2 aZ_Next, short2 aZ_Prev){}
template<> __device__
inline void getIntervale<true>(short2 & aDz, int aZ, int MaxDeltaZ, short2 aZ_Next, short2 aZ_Prev)
{
BasicComputeIntervaleDelta(aDz,aZ,MaxDeltaZ,aZ_Prev);
}
template<> __device__
inline void getIntervale<false>(short2 & aDz, int aZ, int MaxDeltaZ, short2 aZ_Next, short2 aZ_Prev)
{
GetConeZ(aDz,aZ,MaxDeltaZ,aZ_Next,aZ_Prev);
}
template<bool autoMask> __device__
inline uint getCostInit(uint maskCost,uint costInit,bool mask){return 0;}
template<> __device__
inline uint getCostInit<true>(uint maskCost,uint costInit,bool mask)
{
return mask ? maskCost : costInit;
}
template<> __device__
inline uint getCostInit<false>(uint maskCost,uint costInit,bool mask)
{
return costInit;
}
template<bool autoMask> __device__
inline void connectMask(uint &costMin,uint costInit, uint prevDefCor, ushort costTransDefMask,bool mask){}
template<> __device__
inline void connectMask<true>(uint &costMin,uint costInit, uint prevDefCor, ushort costTransDefMask,bool mask)
{
if(!mask)
costMin = min(costMin, costInit + prevDefCor + costTransDefMask );
}
template<bool sens> __device__
inline short __delta()
{
return 0;
}
template<> __device__
inline short __delta<true>()
{
return 0;
}
template<> __device__
inline short __delta<false>()
{
return -WARPSIZE + 1;
}
template<bool sens> __device__
inline void __autoMask(uint &prevDefCor,const ushort &cDefCor,uint &prevMinCost,uint &prevMinCostCells, const uint &globMinFCost,p_ReadLine &p,SimpleStream<uint> &streamDefCor)
{
// uint defCor = prevDefCor + cDefCor;
// if(p.prevDefCor != 0)
// defCor = min(defCor,cDefCor + prevMinCostCells + p.costTransDefMask);
// prevDefCor = defCor - prevMinCost;
if(p.prevDefCor != 0)
prevDefCor = cDefCor - prevMinCost + min(prevDefCor,prevMinCostCells + p.costTransDefMask);
else
prevDefCor = cDefCor - prevMinCost + prevDefCor;
prevMinCostCells = globMinFCost;
prevMinCost = min(globMinFCost,prevDefCor);
p.prevDefCor = cDefCor;
if(p.tid == 0)
{
const ushort idGline = p.line.id + p.seg.id;
streamDefCor.SetOrAddValue<sens>(__choose<sens>((uint)idGline , p.line.lenght - idGline),prevDefCor,prevDefCor-cDefCor);
}
}
template<bool sens,bool hasMask> __device__
inline void autoMask(uint &prevDefCor,const ushort &cDefCor,uint &prevMinCost,uint &prevMinCostCells, const uint &globMinFCost,p_ReadLine &p,SimpleStream<uint> &streamDefCor)
{
prevMinCost = globMinFCost;
}
template<> __device__
inline void autoMask<true,true>(uint &prevDefCor,const ushort &cDefCor,uint &prevMinCost,uint &prevMinCostCells, const uint &globMinFCost,p_ReadLine &p,SimpleStream<uint> &streamDefCor)
{
__autoMask<true>(prevDefCor,cDefCor,prevMinCost,prevMinCostCells, globMinFCost,p,streamDefCor);
}
template<> __device__
inline void autoMask<false,true>(uint &prevDefCor,const ushort &cDefCor,uint &prevMinCost,uint &prevMinCostCells, const uint &globMinFCost,p_ReadLine &p,SimpleStream<uint> &streamDefCor)
{
__autoMask<false>(prevDefCor,cDefCor,prevMinCost,prevMinCostCells, globMinFCost,p,streamDefCor);
}
template<bool sens,bool hasMask> __device__
void connectCellsLine(
SimpleStream<short3> &streamIndex,
SimpleStream<uint> &streamFCost,
SimpleStream<ushort> &streamICost,
SimpleStream<uint> &streamDefCor,
short3 *S_Bf_Index,
ushort *ST_Bf_ICost,
uint *S_FCost[2],
p_ReadLine &p
)
{
short3* ST_Bf_Index = S_Bf_Index + p.tid + __delta<sens>();
__shared__ uint minCost[WARPSIZE];
short2 ConeZ;
uint globMinFCost;
bool lined = p.line.id < p.line.lenght;
const int regulZ = (int)((float)10000.f*p.ZRegul);
// Remarque
// p.seg.id = 1 au premier passage, car simple copie des initcost
//////////////////////////////////////////////////
/// TODO!!!! : quel doit etre prevDefCor p.costTransDefMask + p.costDefMask ou p.costDefMask
/////////////////////////////////////////////////
uint prevDefCor =/* p.costTransDefMask + */p.prevDefCor; // TODO Voir la valeur mettre!!!
const ushort idGline = p.line.id + p.seg.id;
streamDefCor.SetOrAddValue<sens>(__choose<sens>((uint)idGline, p.line.lenght - idGline),prevDefCor);
uint prevMinCostCells = 0; // TODO cette valeur doit etre determiner
uint prevMinCost = 0;
while(lined)
{
while(p.seg.id < p.seg.lenght)
{
const short3 dTer = S_Bf_Index[sgn(p.seg.id)];
const short2 indexZ = make_short2(dTer.x,dTer.y);
const ushort cDefCor = dTer.z;
const bool maskTer = cDefCor == 0;
const ushort dZ = count(indexZ); // creer buffer de count
ushort z = 0;
globMinFCost = max_cost;
while( z < dZ)
{
// Lecture du stream si le buffer est vide | TODO VERIFIER si > ou >=
if(p.ID_Bf_Icost >= p.sizeBuffer)
{
streamICost.read<sens>(ST_Bf_ICost); // Lecture des couts correlations
streamFCost.incre<sens>(); // Pointage sur la sortie
p.ID_Bf_Icost = 0; // Pointage la premire valeur du buffer des couts correlations
}
uint fCostMin = max_cost;
uint costInit = getCostInit<hasMask>(500000,ST_Bf_ICost[sgn(p.ID_Bf_Icost)],maskTer);
const ushort tZ = z + p.stid<sens>();
const short Z = __choose<sens>((short)(tZ + indexZ.x),(short)(indexZ.y - tZ - 1));
const short pitPrZ = __choose<sens>((short)(Z - p.prev_Dz.x ), (short)(p.prev_Dz.y - Z - 1));
getIntervale<hasMask>(ConeZ,Z,p.pente,indexZ,p.prev_Dz);
const uint* prevFCost = S_FCost[p.Id_Buf] + sgn(pitPrZ);
ConeZ.y = min(p.sizeBuffer - pitPrZ,ConeZ.y );
for (short i = ConeZ.x; i <= ConeZ.y; ++i) //--> TO DO cette etape n'est pas necessaire si nous sommes en dehors du masque Ter
fCostMin = min(fCostMin, costInit + prevFCost[i] + abs((int)i)*regulZ);
connectMask<hasMask>(fCostMin,costInit,prevDefCor,p.costTransDefMask,maskTer);
if(tZ < dZ && p.ID_Bf_Icost + p.stid<sens>() < p.sizeBuffer && tZ < p.sizeBuffer)
{
fCostMin -= prevMinCost;
minCost[p.tid] = fCostMin;
S_FCost[!p.Id_Buf][sgn(tZ)] = fCostMin;
streamFCost.SetOrAddValue<sens>(sgn(p.ID_Bf_Icost),fCostMin,fCostMin - costInit);
}
else
minCost[p.tid] = max_cost;
minR(minCost,globMinFCost); // TODO verifier cette fonction elle peut lancer trop de fois..... Attentioncd ,inline en attendant
const ushort pIdCost = p.ID_Bf_Icost;
p.ID_Bf_Icost += min(dZ - z , WARPSIZE);
z += min(p.sizeBuffer-pIdCost , WARPSIZE);
}
autoMask<sens,hasMask>(prevDefCor,cDefCor,prevMinCost,prevMinCostCells, globMinFCost,p,streamDefCor);
p.prev_Dz = indexZ;
p.seg.id++;
p.swBuf();
}
p.line.id += p.seg.lenght;
lined = p.line.id < p.line.lenght;
if(lined)
{
streamIndex.read<sens>(ST_Bf_Index);
p.seg.lenght = min(p.line.LOver(),WARPSIZE);
p.seg.id = 0; // position dans le segment du stream index des Z
}
}
}
// TODO Passer les parametres en variable constante !!!!!!!!!!!
template<class T,bool hasMask> __global__
void Kernel_OptimisationOneDirection(ushort* g_ICost, short3* g_Index, uint* g_FCost, uint* g_DefCor, uint3* g_RecStrParam, ushort penteMax, float zReg,float zRegQuad, ushort costDefMask,ushort costTransDefMask,ushort sizeBuffer,bool hasMaskauto)
{
extern __shared__ float sharedMemory[];
ushort* S_BuffICost0 = (ushort*) sharedMemory;
uint* S_BuffFCost0 = (uint*) &S_BuffICost0[sizeBuffer + 2*WARPSIZE];
uint* S_BuffFCost1 = (uint*) &S_BuffFCost0[sizeBuffer + 2*WARPSIZE];
short3* S_BuffIndex = (short3*) &S_BuffFCost1[sizeBuffer + 2*WARPSIZE];
uint* pit_Id = (uint*) &S_BuffIndex[WARPSIZE];
uint* pit_Stream = pit_Id + 1;
p_ReadLine p(threadIdx.x,penteMax,zReg,zRegQuad,costDefMask,costTransDefMask,sizeBuffer,hasMaskauto);
uint* S_BuffFCost[2] = {S_BuffFCost0 + WARPSIZE,S_BuffFCost1 + WARPSIZE};
ushort* S_BuffICost = S_BuffICost0 + WARPSIZE + p.tid;
if(!threadIdx.x)
{
*pit_Stream = g_RecStrParam[blockIdx.x].x;
*pit_Id = g_RecStrParam[blockIdx.x].y;
}
__syncthreads();
p.line.lenght = g_RecStrParam[blockIdx.x].z;
p.seg.lenght = min(p.line.LOver(),WARPSIZE);
SimpleStream<ushort> streamICost( g_ICost + *pit_Stream ,sizeBuffer);
SimpleStream<uint> streamFCost( g_FCost + *pit_Stream ,sizeBuffer);
SimpleStream<short3> streamIndex( g_Index + *pit_Id ,WARPSIZE);
SimpleStream<uint> streamDefCor( g_DefCor + *pit_Id ,WARPSIZE);
if(p.tid == 0)
streamDefCor.SetValue(0,0); // car la premiere ligne n'est calculer
// Attention voir pour le retour arriere
streamICost.read<eAVANT>(S_BuffICost);
streamIndex.read<eAVANT>(S_BuffIndex + p.tid);
p.prev_Dz = make_short2(S_BuffIndex[0].x,S_BuffIndex[0].y);
p.prevDefCor = S_BuffIndex[0].z;
p.ID_Bf_Icost = count(p.prev_Dz);
for (ushort i = 0; i < p.ID_Bf_Icost - p.tid; i+=WARPSIZE)
{
S_BuffFCost[p.Id_Buf][i + p.tid] = S_BuffICost[i];
streamFCost.SetValue(i,S_BuffICost[i]);
}
connectCellsLine<eAVANT,hasMask>(streamIndex,streamFCost,streamICost,streamDefCor,S_BuffIndex,S_BuffICost,S_BuffFCost,p);
streamIndex.ReverseIncre<eARRIERE>();
streamFCost.incre<eAVANT>();
streamFCost.reverse<eARRIERE>();
S_BuffFCost[0] += sizeBuffer;
S_BuffFCost[1] += sizeBuffer;
S_BuffICost += sizeBuffer - WARPSIZE;
streamICost.readFrom<eARRIERE>(S_BuffFCost[p.Id_Buf] + p.tid, sizeBuffer - p.ID_Bf_Icost);
streamICost.ReverseIncre<eARRIERE>();
p.reverse(S_BuffIndex,sizeBuffer);
if(p.ID_Bf_Icost > sizeBuffer)
{
p.ID_Bf_Icost -= sizeBuffer;
streamICost.read<eARRIERE>(S_BuffICost);
streamFCost.incre<eARRIERE>();
}
uint* locFCost = S_BuffFCost[p.Id_Buf] - p.stid<eARRIERE>();
for (ushort i = 0; i < sizeBuffer; i+=WARPSIZE)
locFCost[-i] = S_BuffICost[-i];
connectCellsLine<eARRIERE,hasMask>( streamIndex,streamFCost,streamICost,streamDefCor,S_BuffIndex + WARPSIZE - 1,S_BuffICost,S_BuffFCost,p);
}
extern "C" void Gpu_OptimisationOneDirection(Data2Optimiz<CuDeviceData3D> &d2O)
{
ushort deltaMax = d2O.penteMax();
float zReg = (float)d2O.zReg();
float zRegQuad = d2O.zRegQuad();
ushort costDefMask = d2O.CostDefMasked();
ushort costTransDefMask = d2O.CostTransMaskNoMask();
bool hasMaskauto = d2O.hasMaskAuto();
dim3 Threads(WARPSIZE,1,1);
dim3 Blocks(d2O.NBlines(),1,1);
ushort sizeBuff = min(d2O.DzMax(),4096); //NAPPEMAX;
ushort cacheLin = sizeBuff + 2 * WARPSIZE;
// Calcul de l'allocation dynamique de la memoire partage
uint sizeSharedMemory =
cacheLin * sizeof(ushort) + // S_BuffICost0
cacheLin * sizeof(uint) + // S_BuffFCost0
cacheLin * sizeof(uint) + // S_BuffFCost1
WARPSIZE * sizeof(short3) + // S_BuffIndex
// WARPSIZE * sizeof(uint) + // S_BuffDefCor
sizeof(uint) + // pit_Id
sizeof(uint); // pit_Stream
if(hasMaskauto)
hipLaunchKernelGGL(( Kernel_OptimisationOneDirection< uint,true >), dim3(Blocks),dim3(Threads),sizeSharedMemory, 0,
d2O.pInitCost(),
d2O.pIndex(),
d2O.pForceCostVol(),
d2O.pDefCor(),
d2O.pParam(),
deltaMax,
zReg,
zRegQuad,
costDefMask,
costTransDefMask,
sizeBuff,
hasMaskauto
);
else
hipLaunchKernelGGL(( Kernel_OptimisationOneDirection< uint,false >), dim3(Blocks),dim3(Threads),sizeSharedMemory, 0,
d2O.pInitCost(),
d2O.pIndex(),
d2O.pForceCostVol(),
d2O.pDefCor(),
d2O.pParam(),
deltaMax,
zReg,
zRegQuad,
costDefMask,
costTransDefMask,
sizeBuff,
hasMaskauto
);
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
printf("Error CUDA Gpu_OptimisationOneDirection\n");
printf("%s",hipGetErrorString(err));
DUMP(d2O.NBlines());
DUMP(sizeSharedMemory);
DUMP(d2O.DzMax());
}
getLastCudaError("TestkernelOptiOneDirection failed");
}
#endif //_OPTIMISATION_KERNEL_Z_H_
| fe3fa5506d9663f96c50fd32cb2d894c0e4bc910.cu | #ifndef _OPTIMISATION_KERNEL_Z_H_
#define _OPTIMISATION_KERNEL_Z_H_
#include "GpGpu/GpGpu_StreamData.cuh"
#include "GpGpu/SData2Optimize.h"
// On pourrait imaginer un buffer des tailles calculer en parallel
// SIZEBUFFER[threadIdx.x] = count(lI[threadIdx.x]);
__device__ void GetConeZ(short2 & aDz, int aZ, int MaxDeltaZ, short2 aZ_Next, short2 aZ_Prev)
{
aDz.x = aZ_Prev.x-aZ;
if (aZ != aZ_Next.x)
aDz.x = max(aDz.x,-MaxDeltaZ);
aDz.y = aZ_Prev.y-1-aZ;
if (aZ != aZ_Next.y-1)
aDz.y = min(aDz.y,MaxDeltaZ);
if (aDz.x > aDz.y)
if (aDz.y <0)
aDz.x = aDz.y;
else
aDz.y = aDz.x;
}
__device__ void BasicComputeIntervaleDelta
(
short2 & aDz,
int aZ,
int MaxDeltaZ,
short2 aZ_Prev
)
{
aDz.x = max(-MaxDeltaZ,aZ_Prev.x-aZ);
aDz.y = min(MaxDeltaZ,aZ_Prev.y-1-aZ);
}
inline __device__ uint minR(uint *sMin, uint &globalMin){ // TODO attention ajout de inline
ushort thread2;
uint temp;
//
int nTotalThreads = WARPSIZE; // Total number of threads, rounded up to the next power of two
while(nTotalThreads > 1)
{
int halfPoint = (nTotalThreads >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint)
{
thread2 = threadIdx.x + halfPoint;
// Skipping the fictious threads blockDim.x ... blockDim_2-1
if (thread2 < blockDim.x)
{
// Get the shared value stored by another thread
temp = sMin[thread2];
if (temp < sMin[threadIdx.x])
sMin[threadIdx.x] = temp;
}
}
// Reducing the binary tree size by two:
nTotalThreads = halfPoint;
}
const uint minus = sMin[0];
if(minus < globalMin) globalMin = minus;
return minus;
}
template<bool sens> __device__
inline uint __choose(uint kav,uint kar)
{
return 0;
}
template<> __device__
inline uint __choose<true>(uint kav,uint kar)
{
return kav;
}
template<> __device__
inline uint __choose<false>(uint kav,uint kar)
{
return kar;
}
template<bool sens> __device__
inline ushort __choose(ushort kav,ushort kar)
{
return 0;
}
template<> __device__
inline ushort __choose<true>(ushort kav,ushort kar)
{
return kav;
}
template<> __device__
inline ushort __choose<false>(ushort kav,ushort kar)
{
return kar;
}
template<bool sens> __device__
inline short __choose(short kav,short kar)
{
return 0;
}
template<> __device__
inline short __choose<true>(short kav,short kar)
{
return kav;
}
template<> __device__
inline short __choose<false>(short kav,short kar)
{
return kar;
}
template<bool autoMask> __device__
inline void getIntervale(short2 & aDz, int aZ, int MaxDeltaZ, short2 aZ_Next, short2 aZ_Prev){}
template<> __device__
inline void getIntervale<true>(short2 & aDz, int aZ, int MaxDeltaZ, short2 aZ_Next, short2 aZ_Prev)
{
BasicComputeIntervaleDelta(aDz,aZ,MaxDeltaZ,aZ_Prev);
}
template<> __device__
inline void getIntervale<false>(short2 & aDz, int aZ, int MaxDeltaZ, short2 aZ_Next, short2 aZ_Prev)
{
GetConeZ(aDz,aZ,MaxDeltaZ,aZ_Next,aZ_Prev);
}
template<bool autoMask> __device__
inline uint getCostInit(uint maskCost,uint costInit,bool mask){return 0;}
template<> __device__
inline uint getCostInit<true>(uint maskCost,uint costInit,bool mask)
{
return mask ? maskCost : costInit;
}
template<> __device__
inline uint getCostInit<false>(uint maskCost,uint costInit,bool mask)
{
return costInit;
}
template<bool autoMask> __device__
inline void connectMask(uint &costMin,uint costInit, uint prevDefCor, ushort costTransDefMask,bool mask){}
template<> __device__
inline void connectMask<true>(uint &costMin,uint costInit, uint prevDefCor, ushort costTransDefMask,bool mask)
{
if(!mask)
costMin = min(costMin, costInit + prevDefCor + costTransDefMask );
}
template<bool sens> __device__
inline short __delta()
{
return 0;
}
template<> __device__
inline short __delta<true>()
{
return 0;
}
template<> __device__
inline short __delta<false>()
{
return -WARPSIZE + 1;
}
template<bool sens> __device__
inline void __autoMask(uint &prevDefCor,const ushort &cDefCor,uint &prevMinCost,uint &prevMinCostCells, const uint &globMinFCost,p_ReadLine &p,SimpleStream<uint> &streamDefCor)
{
// uint defCor = prevDefCor + cDefCor;
// if(p.prevDefCor != 0)
// defCor = min(defCor,cDefCor + prevMinCostCells + p.costTransDefMask);
// prevDefCor = defCor - prevMinCost;
if(p.prevDefCor != 0)
prevDefCor = cDefCor - prevMinCost + min(prevDefCor,prevMinCostCells + p.costTransDefMask);
else
prevDefCor = cDefCor - prevMinCost + prevDefCor;
prevMinCostCells = globMinFCost;
prevMinCost = min(globMinFCost,prevDefCor);
p.prevDefCor = cDefCor;
if(p.tid == 0)
{
const ushort idGline = p.line.id + p.seg.id;
streamDefCor.SetOrAddValue<sens>(__choose<sens>((uint)idGline , p.line.lenght - idGline),prevDefCor,prevDefCor-cDefCor);
}
}
template<bool sens,bool hasMask> __device__
inline void autoMask(uint &prevDefCor,const ushort &cDefCor,uint &prevMinCost,uint &prevMinCostCells, const uint &globMinFCost,p_ReadLine &p,SimpleStream<uint> &streamDefCor)
{
prevMinCost = globMinFCost;
}
template<> __device__
inline void autoMask<true,true>(uint &prevDefCor,const ushort &cDefCor,uint &prevMinCost,uint &prevMinCostCells, const uint &globMinFCost,p_ReadLine &p,SimpleStream<uint> &streamDefCor)
{
__autoMask<true>(prevDefCor,cDefCor,prevMinCost,prevMinCostCells, globMinFCost,p,streamDefCor);
}
template<> __device__
inline void autoMask<false,true>(uint &prevDefCor,const ushort &cDefCor,uint &prevMinCost,uint &prevMinCostCells, const uint &globMinFCost,p_ReadLine &p,SimpleStream<uint> &streamDefCor)
{
__autoMask<false>(prevDefCor,cDefCor,prevMinCost,prevMinCostCells, globMinFCost,p,streamDefCor);
}
template<bool sens,bool hasMask> __device__
void connectCellsLine(
SimpleStream<short3> &streamIndex,
SimpleStream<uint> &streamFCost,
SimpleStream<ushort> &streamICost,
SimpleStream<uint> &streamDefCor,
short3 *S_Bf_Index,
ushort *ST_Bf_ICost,
uint *S_FCost[2],
p_ReadLine &p
)
{
short3* ST_Bf_Index = S_Bf_Index + p.tid + __delta<sens>();
__shared__ uint minCost[WARPSIZE];
short2 ConeZ;
uint globMinFCost;
bool lined = p.line.id < p.line.lenght;
const int regulZ = (int)((float)10000.f*p.ZRegul);
// Remarque
// p.seg.id = 1 au premier passage, car simple copie des initcost
//////////////////////////////////////////////////
/// TODO!!!! : quel doit etre prevDefCor p.costTransDefMask + p.costDefMask ou p.costDefMask
/////////////////////////////////////////////////
uint prevDefCor =/* p.costTransDefMask + */p.prevDefCor; // TODO Voir la valeur à mettre!!!
const ushort idGline = p.line.id + p.seg.id;
streamDefCor.SetOrAddValue<sens>(__choose<sens>((uint)idGline, p.line.lenght - idGline),prevDefCor);
uint prevMinCostCells = 0; // TODO cette valeur doit etre determiner
uint prevMinCost = 0;
while(lined)
{
while(p.seg.id < p.seg.lenght)
{
const short3 dTer = S_Bf_Index[sgn(p.seg.id)];
const short2 indexZ = make_short2(dTer.x,dTer.y);
const ushort cDefCor = dTer.z;
const bool maskTer = cDefCor == 0;
const ushort dZ = count(indexZ); // creer buffer de count
ushort z = 0;
globMinFCost = max_cost;
while( z < dZ)
{
// Lecture du stream si le buffer est vide | TODO VERIFIER si > ou >=
if(p.ID_Bf_Icost >= p.sizeBuffer)
{
streamICost.read<sens>(ST_Bf_ICost); // Lecture des couts correlations
streamFCost.incre<sens>(); // Pointage sur la sortie
p.ID_Bf_Icost = 0; // Pointage la première valeur du buffer des couts correlations
}
uint fCostMin = max_cost;
uint costInit = getCostInit<hasMask>(500000,ST_Bf_ICost[sgn(p.ID_Bf_Icost)],maskTer);
const ushort tZ = z + p.stid<sens>();
const short Z = __choose<sens>((short)(tZ + indexZ.x),(short)(indexZ.y - tZ - 1));
const short pitPrZ = __choose<sens>((short)(Z - p.prev_Dz.x ), (short)(p.prev_Dz.y - Z - 1));
getIntervale<hasMask>(ConeZ,Z,p.pente,indexZ,p.prev_Dz);
const uint* prevFCost = S_FCost[p.Id_Buf] + sgn(pitPrZ);
ConeZ.y = min(p.sizeBuffer - pitPrZ,ConeZ.y );
for (short i = ConeZ.x; i <= ConeZ.y; ++i) //--> TO DO cette etape n'est pas necessaire si nous sommes en dehors du masque Ter
fCostMin = min(fCostMin, costInit + prevFCost[i] + abs((int)i)*regulZ);
connectMask<hasMask>(fCostMin,costInit,prevDefCor,p.costTransDefMask,maskTer);
if(tZ < dZ && p.ID_Bf_Icost + p.stid<sens>() < p.sizeBuffer && tZ < p.sizeBuffer)
{
fCostMin -= prevMinCost;
minCost[p.tid] = fCostMin;
S_FCost[!p.Id_Buf][sgn(tZ)] = fCostMin;
streamFCost.SetOrAddValue<sens>(sgn(p.ID_Bf_Icost),fCostMin,fCostMin - costInit);
}
else
minCost[p.tid] = max_cost;
minR(minCost,globMinFCost); // TODO verifier cette fonction elle peut lancer trop de fois..... Attentioncd ,inline en attendant
const ushort pIdCost = p.ID_Bf_Icost;
p.ID_Bf_Icost += min(dZ - z , WARPSIZE);
z += min(p.sizeBuffer-pIdCost , WARPSIZE);
}
autoMask<sens,hasMask>(prevDefCor,cDefCor,prevMinCost,prevMinCostCells, globMinFCost,p,streamDefCor);
p.prev_Dz = indexZ;
p.seg.id++;
p.swBuf();
}
p.line.id += p.seg.lenght;
lined = p.line.id < p.line.lenght;
if(lined)
{
streamIndex.read<sens>(ST_Bf_Index);
p.seg.lenght = min(p.line.LOver(),WARPSIZE);
p.seg.id = 0; // position dans le segment du stream index des Z
}
}
}
// TODO Passer les parametres en variable constante !!!!!!!!!!!
template<class T,bool hasMask> __global__
void Kernel_OptimisationOneDirection(ushort* g_ICost, short3* g_Index, uint* g_FCost, uint* g_DefCor, uint3* g_RecStrParam, ushort penteMax, float zReg,float zRegQuad, ushort costDefMask,ushort costTransDefMask,ushort sizeBuffer,bool hasMaskauto)
{
extern __shared__ float sharedMemory[];
ushort* S_BuffICost0 = (ushort*) sharedMemory;
uint* S_BuffFCost0 = (uint*) &S_BuffICost0[sizeBuffer + 2*WARPSIZE];
uint* S_BuffFCost1 = (uint*) &S_BuffFCost0[sizeBuffer + 2*WARPSIZE];
short3* S_BuffIndex = (short3*) &S_BuffFCost1[sizeBuffer + 2*WARPSIZE];
uint* pit_Id = (uint*) &S_BuffIndex[WARPSIZE];
uint* pit_Stream = pit_Id + 1;
p_ReadLine p(threadIdx.x,penteMax,zReg,zRegQuad,costDefMask,costTransDefMask,sizeBuffer,hasMaskauto);
uint* S_BuffFCost[2] = {S_BuffFCost0 + WARPSIZE,S_BuffFCost1 + WARPSIZE};
ushort* S_BuffICost = S_BuffICost0 + WARPSIZE + p.tid;
if(!threadIdx.x)
{
*pit_Stream = g_RecStrParam[blockIdx.x].x;
*pit_Id = g_RecStrParam[blockIdx.x].y;
}
__syncthreads();
p.line.lenght = g_RecStrParam[blockIdx.x].z;
p.seg.lenght = min(p.line.LOver(),WARPSIZE);
SimpleStream<ushort> streamICost( g_ICost + *pit_Stream ,sizeBuffer);
SimpleStream<uint> streamFCost( g_FCost + *pit_Stream ,sizeBuffer);
SimpleStream<short3> streamIndex( g_Index + *pit_Id ,WARPSIZE);
SimpleStream<uint> streamDefCor( g_DefCor + *pit_Id ,WARPSIZE);
if(p.tid == 0)
streamDefCor.SetValue(0,0); // car la premiere ligne n'est calculer
// Attention voir pour le retour arriere
streamICost.read<eAVANT>(S_BuffICost);
streamIndex.read<eAVANT>(S_BuffIndex + p.tid);
p.prev_Dz = make_short2(S_BuffIndex[0].x,S_BuffIndex[0].y);
p.prevDefCor = S_BuffIndex[0].z;
p.ID_Bf_Icost = count(p.prev_Dz);
for (ushort i = 0; i < p.ID_Bf_Icost - p.tid; i+=WARPSIZE)
{
S_BuffFCost[p.Id_Buf][i + p.tid] = S_BuffICost[i];
streamFCost.SetValue(i,S_BuffICost[i]);
}
connectCellsLine<eAVANT,hasMask>(streamIndex,streamFCost,streamICost,streamDefCor,S_BuffIndex,S_BuffICost,S_BuffFCost,p);
streamIndex.ReverseIncre<eARRIERE>();
streamFCost.incre<eAVANT>();
streamFCost.reverse<eARRIERE>();
S_BuffFCost[0] += sizeBuffer;
S_BuffFCost[1] += sizeBuffer;
S_BuffICost += sizeBuffer - WARPSIZE;
streamICost.readFrom<eARRIERE>(S_BuffFCost[p.Id_Buf] + p.tid, sizeBuffer - p.ID_Bf_Icost);
streamICost.ReverseIncre<eARRIERE>();
p.reverse(S_BuffIndex,sizeBuffer);
if(p.ID_Bf_Icost > sizeBuffer)
{
p.ID_Bf_Icost -= sizeBuffer;
streamICost.read<eARRIERE>(S_BuffICost);
streamFCost.incre<eARRIERE>();
}
uint* locFCost = S_BuffFCost[p.Id_Buf] - p.stid<eARRIERE>();
for (ushort i = 0; i < sizeBuffer; i+=WARPSIZE)
locFCost[-i] = S_BuffICost[-i];
connectCellsLine<eARRIERE,hasMask>( streamIndex,streamFCost,streamICost,streamDefCor,S_BuffIndex + WARPSIZE - 1,S_BuffICost,S_BuffFCost,p);
}
extern "C" void Gpu_OptimisationOneDirection(Data2Optimiz<CuDeviceData3D> &d2O)
{
ushort deltaMax = d2O.penteMax();
float zReg = (float)d2O.zReg();
float zRegQuad = d2O.zRegQuad();
ushort costDefMask = d2O.CostDefMasked();
ushort costTransDefMask = d2O.CostTransMaskNoMask();
bool hasMaskauto = d2O.hasMaskAuto();
dim3 Threads(WARPSIZE,1,1);
dim3 Blocks(d2O.NBlines(),1,1);
ushort sizeBuff = min(d2O.DzMax(),4096); //NAPPEMAX;
ushort cacheLin = sizeBuff + 2 * WARPSIZE;
// Calcul de l'allocation dynamique de la memoire partagée
uint sizeSharedMemory =
cacheLin * sizeof(ushort) + // S_BuffICost0
cacheLin * sizeof(uint) + // S_BuffFCost0
cacheLin * sizeof(uint) + // S_BuffFCost1
WARPSIZE * sizeof(short3) + // S_BuffIndex
// WARPSIZE * sizeof(uint) + // S_BuffDefCor
sizeof(uint) + // pit_Id
sizeof(uint); // pit_Stream
if(hasMaskauto)
Kernel_OptimisationOneDirection< uint,true ><<<Blocks,Threads,sizeSharedMemory>>>
(
d2O.pInitCost(),
d2O.pIndex(),
d2O.pForceCostVol(),
d2O.pDefCor(),
d2O.pParam(),
deltaMax,
zReg,
zRegQuad,
costDefMask,
costTransDefMask,
sizeBuff,
hasMaskauto
);
else
Kernel_OptimisationOneDirection< uint,false ><<<Blocks,Threads,sizeSharedMemory>>>
(
d2O.pInitCost(),
d2O.pIndex(),
d2O.pForceCostVol(),
d2O.pDefCor(),
d2O.pParam(),
deltaMax,
zReg,
zRegQuad,
costDefMask,
costTransDefMask,
sizeBuff,
hasMaskauto
);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
printf("Error CUDA Gpu_OptimisationOneDirection\n");
printf("%s",cudaGetErrorString(err));
DUMP(d2O.NBlines());
DUMP(sizeSharedMemory);
DUMP(d2O.DzMax());
}
getLastCudaError("TestkernelOptiOneDirection failed");
}
#endif //_OPTIMISATION_KERNEL_Z_H_
|
8fbf558e945586336b9a90ddaa8c2d544d09ebee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include "HasherTest.cuh"
#include "StringTest.cuh"
#include "LinkedListTest.cuh"
#include "SetTest.cuh"
#include "MapTest.cuh"
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != hipSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << hipGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
__global__ void TestKernel()
{
printf("INT_MIN %d\n", INT_MIN);
printf("INT_MAX %d\n", INT_MAX);
HasherTest::TestOne();
StringTest::EmptyTest();
StringTest::TestOne();
StringTest::ItoATest();
StringTest::AtoFTest();
StringTest::AtoITest();
LinkedListTest::LinkedListTest lltest;
lltest.InsertionTest();
SetTest::SetTest setTest;
setTest.TestIntBasic();
setTest.TestInt();
setTest.TestInt2();
setTest.TestString();
setTest.TestSetOfSetOfString();
MapTest::MapTest mapTest;
mapTest.TestInteger();
mapTest.TestString();
mapTest.TestMapOfMap();
}
struct CompareDouble
{
inline bool operator() (double& a, double& b) {
return a < b;
}
};
int main()
{
//printf("%d\n", sizeof(CompareDouble));
hipLaunchKernelGGL(( TestKernel), dim3(1), dim3(1), 0, 0, );
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
return 0;
}
| 8fbf558e945586336b9a90ddaa8c2d544d09ebee.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include "HasherTest.cuh"
#include "StringTest.cuh"
#include "LinkedListTest.cuh"
#include "SetTest.cuh"
#include "MapTest.cuh"
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
__global__ void TestKernel()
{
printf("INT_MIN %d\n", INT_MIN);
printf("INT_MAX %d\n", INT_MAX);
HasherTest::TestOne();
StringTest::EmptyTest();
StringTest::TestOne();
StringTest::ItoATest();
StringTest::AtoFTest();
StringTest::AtoITest();
LinkedListTest::LinkedListTest lltest;
lltest.InsertionTest();
SetTest::SetTest setTest;
setTest.TestIntBasic();
setTest.TestInt();
setTest.TestInt2();
setTest.TestString();
setTest.TestSetOfSetOfString();
MapTest::MapTest mapTest;
mapTest.TestInteger();
mapTest.TestString();
mapTest.TestMapOfMap();
}
struct CompareDouble
{
inline bool operator() (double& a, double& b) {
return a < b;
}
};
int main()
{
//printf("%d\n", sizeof(CompareDouble));
TestKernel<<<1, 1>>> ();
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
return 0;
}
|
8324d24c3b8f3edc32b95705fbf713c6f020cf87.hip | // !!! This is a file automatically generated by hipify!!!
// a simple code to understand the grid and block layout
// and thread numbering scheme
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void) {
printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) "
"gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z,
gridDim.x,gridDim.y,gridDim.z);
}
int main(int argc, char **argv) {
// define total data element
int nElem = 6;
// define grid and block structure
dim3 block (3);
dim3 grid ((nElem+block.x-1)/block.x);
// check grid and block dimension from host side
printf("grid.x %d grid.y %d grid.z %d\n",grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n",block.x, block.y, block.z);
// check grid and block dimension from device side
hipLaunchKernelGGL(( checkIndex) , dim3(grid), dim3(block), 0, 0, );
// reset device before you leave
hipDeviceReset();
return(0);
}
| 8324d24c3b8f3edc32b95705fbf713c6f020cf87.cu | // a simple code to understand the grid and block layout
// and thread numbering scheme
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void) {
printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) "
"gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z,
gridDim.x,gridDim.y,gridDim.z);
}
int main(int argc, char **argv) {
// define total data element
int nElem = 6;
// define grid and block structure
dim3 block (3);
dim3 grid ((nElem+block.x-1)/block.x);
// check grid and block dimension from host side
printf("grid.x %d grid.y %d grid.z %d\n",grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n",block.x, block.y, block.z);
// check grid and block dimension from device side
checkIndex <<<grid, block>>> ();
// reset device before you leave
cudaDeviceReset();
return(0);
}
|
44ad6e838c705efabecefe3957725be87b1cdc97.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include "helper_cuda.h"
using namespace std;
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int absolute_pos_x = blockIdx.x * blockDim.x + threadIdx.x;
const int absolute_pos_y = blockIdx.y * blockDim.y + threadIdx.y;
const int index = absolute_pos_y * numCols + absolute_pos_x;
if (absolute_pos_x >= numCols || absolute_pos_y >= numRows)
return;
float new_color = 0.0f;
for (int filter_y = 0; filter_y < filterWidth; filter_y++) {
for (int filter_x = 0; filter_x < filterWidth; filter_x++) {
int local_x = absolute_pos_x + filter_x - filterWidth / 2;
int local_y = absolute_pos_y + filter_y - filterWidth / 2;
local_x = min(max(local_x, 0), numCols - 1); // This is how we clamp the values when we're working on an edge pixel
local_y = min(max(local_y, 0), numRows - 1);
float filter_factor = filter[filter_y * filterWidth + filter_x]; // gotta get our filter values
new_color += filter_factor * static_cast<float>(inputChannel[local_y * numCols + local_x]);
}
}
outputChannel[index] = new_color;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int absolute_pos_x = blockIdx.x * blockDim.x + threadIdx.x;
const int absolute_pos_y = blockIdx.y * blockDim.y + threadIdx.y;
const int m = absolute_pos_y * numCols + absolute_pos_x;
if (absolute_pos_x >= numCols || absolute_pos_y >= numRows)
return;
redChannel[m] = inputImageRGBA[m].x;
greenChannel[m] = inputImageRGBA[m].y;
blueChannel[m] = inputImageRGBA[m].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void gpu_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 threadsPerBlock(32, 32, 1); // aka blockSize
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 numberOfBlocks(numCols / threadsPerBlock.x + 1, numRows / threadsPerBlock.y + 1, 1); // aka gridSize
const dim3 gridSize = numberOfBlocks;
const dim3 blockSize = threadsPerBlock;
//TODO: Launch a kernel for separating the RGBA image into different color channels
// void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel)
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize) , 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize) , 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize) , 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize) , 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize) , 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
| 44ad6e838c705efabecefe3957725be87b1cdc97.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include "helper_cuda.h"
using namespace std;
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int absolute_pos_x = blockIdx.x * blockDim.x + threadIdx.x;
const int absolute_pos_y = blockIdx.y * blockDim.y + threadIdx.y;
const int index = absolute_pos_y * numCols + absolute_pos_x;
if (absolute_pos_x >= numCols || absolute_pos_y >= numRows)
return;
float new_color = 0.0f;
for (int filter_y = 0; filter_y < filterWidth; filter_y++) {
for (int filter_x = 0; filter_x < filterWidth; filter_x++) {
int local_x = absolute_pos_x + filter_x - filterWidth / 2;
int local_y = absolute_pos_y + filter_y - filterWidth / 2;
local_x = min(max(local_x, 0), numCols - 1); // This is how we clamp the values when we're working on an edge pixel
local_y = min(max(local_y, 0), numRows - 1);
float filter_factor = filter[filter_y * filterWidth + filter_x]; // gotta get our filter values
new_color += filter_factor * static_cast<float>(inputChannel[local_y * numCols + local_x]);
}
}
outputChannel[index] = new_color;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int absolute_pos_x = blockIdx.x * blockDim.x + threadIdx.x;
const int absolute_pos_y = blockIdx.y * blockDim.y + threadIdx.y;
const int m = absolute_pos_y * numCols + absolute_pos_x;
if (absolute_pos_x >= numCols || absolute_pos_y >= numRows)
return;
redChannel[m] = inputImageRGBA[m].x;
greenChannel[m] = inputImageRGBA[m].y;
blueChannel[m] = inputImageRGBA[m].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void gpu_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 threadsPerBlock(32, 32, 1); // aka blockSize
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 numberOfBlocks(numCols / threadsPerBlock.x + 1, numRows / threadsPerBlock.y + 1, 1); // aka gridSize
const dim3 gridSize = numberOfBlocks;
const dim3 blockSize = threadsPerBlock;
//TODO: Launch a kernel for separating the RGBA image into different color channels
// void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel)
separateChannels<<<gridSize, blockSize >>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
gaussian_blur<<<gridSize, blockSize >>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize >>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize >>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize >>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
69ad0c43dbeae8abff4d6c0fedd515142186c202.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2021-2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/nullary/eye.h"
#include "cunumeric/nullary/eye_template.inl"
#include "cunumeric/cuda_help.h"
namespace cunumeric {
template <typename VAL>
__global__ static void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
eye_kernel(const AccessorWO<VAL, 2> out, const Point<2> start, const size_t max)
{
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
out[start[0] + offset][start[1] + offset] = 1;
}
template <typename VAL>
struct EyeImplBody<VariantKind::GPU, VAL> {
void operator()(const AccessorWO<VAL, 2>& out,
const Point<2>& start,
const coord_t distance) const
{
const size_t blocks = (distance + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
auto stream = get_cached_stream();
hipLaunchKernelGGL(( eye_kernel<VAL>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, out, start, distance);
CHECK_CUDA_STREAM(stream);
}
};
/*static*/ void EyeTask::gpu_variant(TaskContext& context)
{
eye_template<VariantKind::GPU>(context);
}
} // namespace cunumeric
| 69ad0c43dbeae8abff4d6c0fedd515142186c202.cu | /* Copyright 2021-2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/nullary/eye.h"
#include "cunumeric/nullary/eye_template.inl"
#include "cunumeric/cuda_help.h"
namespace cunumeric {
template <typename VAL>
__global__ static void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
eye_kernel(const AccessorWO<VAL, 2> out, const Point<2> start, const size_t max)
{
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
out[start[0] + offset][start[1] + offset] = 1;
}
template <typename VAL>
struct EyeImplBody<VariantKind::GPU, VAL> {
void operator()(const AccessorWO<VAL, 2>& out,
const Point<2>& start,
const coord_t distance) const
{
const size_t blocks = (distance + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
auto stream = get_cached_stream();
eye_kernel<VAL><<<blocks, THREADS_PER_BLOCK, 0, stream>>>(out, start, distance);
CHECK_CUDA_STREAM(stream);
}
};
/*static*/ void EyeTask::gpu_variant(TaskContext& context)
{
eye_template<VariantKind::GPU>(context);
}
} // namespace cunumeric
|
fb4eb7395ce6a08cf8018767ba6c44feb84850a6.hip | // !!! This is a file automatically generated by hipify!!!
#include <wb.h>
//@@
//@@ . , .
int main(int argc, char **argv) {
int deviceCount;
wbArg_read(argc, argv);
hipGetDeviceCount(&deviceCount);
wbTime_start(GPU, "Getting GPU Data."); //@@
for (int dev = 0; dev < deviceCount; dev++) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
wbLog(TRACE, "No CUDA GPU has been detected");
return -1;
} else if (deviceCount == 1) {
//@@ WbLog API ( Log4J).
//@@ wbLog
//@@ ,
//@@ OFF, FATAL, ERROR, WARN, INFO, DEBUG TRACE,
//@@ , .
wbLog(TRACE, "There is 1 device supporting CUDA");
} else {
wbLog(TRACE, "There are ", deviceCount,
" devices supporting CUDA");
}
}
wbLog(TRACE, "Device ", dev, " name: ", deviceProp.name);
wbLog(TRACE, " Computational Capabilities: ", deviceProp.major, ".",
deviceProp.minor);
wbLog(TRACE, " Maximum global memory size: ",
deviceProp.totalGlobalMem);
wbLog(TRACE, " Maximum constant memory size: ",
deviceProp.totalConstMem);
wbLog(TRACE, " Maximum shared memory size per block: ",
deviceProp.sharedMemPerBlock);
wbLog(TRACE, " Maximum block dimensions: ",
deviceProp.maxThreadsDim[0], " x ", deviceProp.maxThreadsDim[1],
" x ", deviceProp.maxThreadsDim[2]);
wbLog(TRACE, " Maximum grid dimensions: ", deviceProp.maxGridSize[0],
" x ", deviceProp.maxGridSize[1], " x ",
deviceProp.maxGridSize[2]);
wbLog(TRACE, " Warp size: ", deviceProp.warpSize);
}
wbTime_stop(GPU, "Getting GPU Data."); //@@
return 0;
| fb4eb7395ce6a08cf8018767ba6c44feb84850a6.cu | #include <wb.h>
//@@ Задача данного кода – познакомиться с процессом передачи
//@@ данных. Не переживайте, если не понимаете деталей кода.
int main(int argc, char **argv) {
int deviceCount;
wbArg_read(argc, argv);
cudaGetDeviceCount(&deviceCount);
wbTime_start(GPU, "Getting GPU Data."); //@@ Запуск таймера
for (int dev = 0; dev < deviceCount; dev++) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
wbLog(TRACE, "No CUDA GPU has been detected");
return -1;
} else if (deviceCount == 1) {
//@@ WbLog предоставляет API логгирования (схожее с Log4J).
//@@ Функции логгирования wbLog принимает уровень
//@@ логгирования, который может быть одним из
//@@ OFF, FATAL, ERROR, WARN, INFO, DEBUG или TRACE, и
//@@ сообщение, которое нужно напечатать.
wbLog(TRACE, "There is 1 device supporting CUDA");
} else {
wbLog(TRACE, "There are ", deviceCount,
" devices supporting CUDA");
}
}
wbLog(TRACE, "Device ", dev, " name: ", deviceProp.name);
wbLog(TRACE, " Computational Capabilities: ", deviceProp.major, ".",
deviceProp.minor);
wbLog(TRACE, " Maximum global memory size: ",
deviceProp.totalGlobalMem);
wbLog(TRACE, " Maximum constant memory size: ",
deviceProp.totalConstMem);
wbLog(TRACE, " Maximum shared memory size per block: ",
deviceProp.sharedMemPerBlock);
wbLog(TRACE, " Maximum block dimensions: ",
deviceProp.maxThreadsDim[0], " x ", deviceProp.maxThreadsDim[1],
" x ", deviceProp.maxThreadsDim[2]);
wbLog(TRACE, " Maximum grid dimensions: ", deviceProp.maxGridSize[0],
" x ", deviceProp.maxGridSize[1], " x ",
deviceProp.maxGridSize[2]);
wbLog(TRACE, " Warp size: ", deviceProp.warpSize);
}
wbTime_stop(GPU, "Getting GPU Data."); //@@ остановка таймера
return 0;
|
c4cd86c187600494fc3f27e6941e4dd3b65e43fb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "_bcnn_vmul_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
_bcnn_vmul_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a,b,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
_bcnn_vmul_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a,b,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
_bcnn_vmul_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a,b,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c4cd86c187600494fc3f27e6941e4dd3b65e43fb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "_bcnn_vmul_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
_bcnn_vmul_kernel<<<gridBlock,threadBlock>>>(n,a,b,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
_bcnn_vmul_kernel<<<gridBlock,threadBlock>>>(n,a,b,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
_bcnn_vmul_kernel<<<gridBlock,threadBlock>>>(n,a,b,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
717bae9177ddc2acd82c88ff73268d60efc27646.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void curvi (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_i= (int)(blockDim.z);
int i0 = (int)(blockIdx.z)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.z);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (i>=2 & k>=2 & i<=N-3 & k<=N-3) {
for (int j=2; j<=N-3; j++) {
double _t_86_ = 2.0 * mu[i+2][j][k];
_t_86_ += la[i+2][j][k];
double _t_83_ = met1[i+2][j][k] * _t_86_ * met2[i+2][j][k];
double _t_281_ = met1[i+2][j][k] * la[i+2][j][k] * met1[i+2][j][k];
double _v_38_ = c2 * u1[i+2][j][k+2];
double _v_0_ = c2 * u1[i+2][j][k+2];
_v_38_ -= c2 * u1[i+2][j][k-2];
double _v_9_ = c2 * u1[i+2][j][k-2];
double _t_84_ = _v_38_;
double _v_39_ = c1 * u1[i+2][j][k+1];
_v_39_ -= c1 * u1[i+2][j][k-1];
_t_84_ += _v_39_;
double _v_40_ = strx[i] * _t_83_ * _t_84_;
double _v_19_ = c2 * u1[i+2][j][k+1];
double _v_28_ = c2 * u1[i+2][j][k-1];
double _v_56_ = c2 * _v_40_;
double _v_41_ = c2 * u2[i+2][j][k+2];
double _v_3_ = c2 * u2[i+2][j][k+2];
_v_41_ -= c2 * u2[i+2][j][k-2];
double _v_12_ = c2 * u2[i+2][j][k-2];
double _t_91_ = _v_41_;
double _v_42_ = c1 * u2[i+2][j][k+1];
_v_42_ -= c1 * u2[i+2][j][k-1];
_t_91_ += _v_42_;
double _t_90_ = met1[i+2][j][k] * la[i+2][j][k] * met3[i+2][j][k];
double _v_43_ = stry[j] * _t_90_ * _t_91_;
double _v_22_ = c2 * u2[i+2][j][k+1];
double _v_31_ = c2 * u2[i+2][j][k-1];
_v_56_ += c2 * _v_43_;
double _v_44_ = c2 * u3[i+2][j][k+2];
double _v_6_ = c2 * u3[i+2][j][k+2];
_v_44_ -= c2 * u3[i+2][j][k-2];
double _v_15_ = c2 * u3[i+2][j][k-2];
double _t_96_ = _v_44_;
double _v_45_ = c1 * u3[i+2][j][k+1];
_v_45_ -= c1 * u3[i+2][j][k-1];
_t_96_ += _v_45_;
double _t_95_ = met1[i+2][j][k] * la[i+2][j][k] * met4[i+2][j][k];
double _v_46_ = _t_95_ * _t_96_;
double _v_25_ = c2 * u3[i+2][j][k+1];
double _v_34_ = c2 * u3[i+2][j][k-1];
_v_56_ += c2 * _v_46_;
double _t_104_ = 2.0 * mu[i-2][j][k];
_t_104_ += la[i-2][j][k];
double _t_101_ = met1[i-2][j][k] * _t_104_ * met2[i-2][j][k];
double _t_286_ = met1[i-2][j][k] * la[i-2][j][k] * met1[i-2][j][k];
double _v_47_ = c2 * u1[i-2][j][k+2];
_v_0_ -= c2 * u1[i-2][j][k+2];
_v_47_ -= c2 * u1[i-2][j][k-2];
_v_9_ -= c2 * u1[i-2][j][k-2];
double _t_102_ = _v_47_;
double _v_48_ = c1 * u1[i-2][j][k+1];
_v_48_ -= c1 * u1[i-2][j][k-1];
_t_102_ += _v_48_;
double _v_49_ = strx[i] * _t_101_ * _t_102_;
_v_19_ -= c2 * u1[i-2][j][k+1];
_v_28_ -= c2 * u1[i-2][j][k-1];
_v_56_ += c2 * _v_49_;
double _v_50_ = c2 * u2[i-2][j][k+2];
_v_3_ -= c2 * u2[i-2][j][k+2];
_v_50_ -= c2 * u2[i-2][j][k-2];
_v_12_ -= c2 * u2[i-2][j][k-2];
double _t_109_ = _v_50_;
double _v_51_ = c1 * u2[i-2][j][k+1];
_v_51_ -= c1 * u2[i-2][j][k-1];
_t_109_ += _v_51_;
double _t_108_ = met1[i-2][j][k] * la[i-2][j][k] * met3[i-2][j][k];
double _v_52_ = stry[j] * _t_108_ * _t_109_;
_v_22_ -= c2 * u2[i-2][j][k+1];
_v_31_ -= c2 * u2[i-2][j][k-1];
_v_56_ += c2 * _v_52_;
double _v_53_ = c2 * u3[i-2][j][k+2];
_v_6_ -= c2 * u3[i-2][j][k+2];
_v_53_ -= c2 * u3[i-2][j][k-2];
_v_15_ -= c2 * u3[i-2][j][k-2];
double _t_114_ = _v_53_;
double _v_54_ = c1 * u3[i-2][j][k+1];
_v_54_ -= c1 * u3[i-2][j][k-1];
_t_114_ += _v_54_;
double _t_113_ = met1[i-2][j][k] * la[i-2][j][k] * met4[i-2][j][k];
double _v_55_ = _t_113_ * _t_114_;
_v_25_ -= c2 * u3[i-2][j][k+1];
_v_34_ -= c2 * u3[i-2][j][k-1];
_v_56_ += c2 * _v_55_;
double _t_79_ = stry[j] * _v_56_;
double _t_123_ = 2.0 * mu[i+1][j][k];
_t_123_ += la[i+1][j][k];
double _t_120_ = met1[i+1][j][k] * _t_123_ * met2[i+1][j][k];
double _t_292_ = met1[i+1][j][k] * la[i+1][j][k] * met1[i+1][j][k];
double _v_57_ = c2 * u1[i+1][j][k+2];
_v_57_ -= c2 * u1[i+1][j][k-2];
double _t_121_ = _v_57_;
double _v_58_ = c1 * u1[i+1][j][k+1];
double _v_20_ = c1 * u1[i+1][j][k+1];
_v_58_ -= c1 * u1[i+1][j][k-1];
double _v_29_ = c1 * u1[i+1][j][k-1];
_t_121_ += _v_58_;
double _v_59_ = strx[i] * _t_120_ * _t_121_;
double _v_1_ = c1 * u1[i+1][j][k+2];
double _v_10_ = c1 * u1[i+1][j][k-2];
double _v_75_ = c1 * _v_59_;
double _v_60_ = c2 * u2[i+1][j][k+2];
_v_60_ -= c2 * u2[i+1][j][k-2];
double _t_128_ = _v_60_;
double _v_61_ = c1 * u2[i+1][j][k+1];
double _v_23_ = c1 * u2[i+1][j][k+1];
_v_61_ -= c1 * u2[i+1][j][k-1];
double _v_32_ = c1 * u2[i+1][j][k-1];
_t_128_ += _v_61_;
double _t_127_ = met1[i+1][j][k] * la[i+1][j][k] * met3[i+1][j][k];
double _v_62_ = stry[j] * _t_127_ * _t_128_;
double _v_4_ = c1 * u2[i+1][j][k+2];
double _v_13_ = c1 * u2[i+1][j][k-2];
_v_75_ += c1 * _v_62_;
double _v_63_ = c2 * u3[i+1][j][k+2];
_v_63_ -= c2 * u3[i+1][j][k-2];
double _t_133_ = _v_63_;
double _v_64_ = c1 * u3[i+1][j][k+1];
double _v_26_ = c1 * u3[i+1][j][k+1];
_v_64_ -= c1 * u3[i+1][j][k-1];
double _v_35_ = c1 * u3[i+1][j][k-1];
_t_133_ += _v_64_;
double _t_132_ = met1[i+1][j][k] * la[i+1][j][k] * met4[i+1][j][k];
double _v_65_ = _t_132_ * _t_133_;
double _v_7_ = c1 * u3[i+1][j][k+2];
double _v_16_ = c1 * u3[i+1][j][k-2];
_v_75_ += c1 * _v_65_;
double _t_141_ = 2.0 * mu[i-1][j][k];
_t_141_ += la[i-1][j][k];
double _t_138_ = met1[i-1][j][k] * _t_141_ * met2[i-1][j][k];
double _t_297_ = met1[i-1][j][k] * la[i-1][j][k] * met1[i-1][j][k];
double _v_66_ = c2 * u1[i-1][j][k+2];
_v_66_ -= c2 * u1[i-1][j][k-2];
double _t_139_ = _v_66_;
double _v_67_ = c1 * u1[i-1][j][k+1];
_v_20_ -= c1 * u1[i-1][j][k+1];
_v_67_ -= c1 * u1[i-1][j][k-1];
_v_29_ -= c1 * u1[i-1][j][k-1];
_t_139_ += _v_67_;
double _v_68_ = strx[i] * _t_138_ * _t_139_;
_v_1_ -= c1 * u1[i-1][j][k+2];
_v_10_ -= c1 * u1[i-1][j][k-2];
_v_75_ += c1 * _v_68_;
double _v_69_ = c2 * u2[i-1][j][k+2];
_v_69_ -= c2 * u2[i-1][j][k-2];
double _t_146_ = _v_69_;
double _v_70_ = c1 * u2[i-1][j][k+1];
_v_23_ -= c1 * u2[i-1][j][k+1];
_v_70_ -= c1 * u2[i-1][j][k-1];
_v_32_ -= c1 * u2[i-1][j][k-1];
_t_146_ += _v_70_;
double _t_145_ = met1[i-1][j][k] * la[i-1][j][k] * met3[i-1][j][k];
double _v_71_ = stry[j] * _t_145_ * _t_146_;
_v_4_ -= c1 * u2[i-1][j][k+2];
_v_13_ -= c1 * u2[i-1][j][k-2];
_v_75_ += c1 * _v_71_;
double _v_72_ = c2 * u3[i-1][j][k+2];
_v_72_ -= c2 * u3[i-1][j][k-2];
double _t_151_ = _v_72_;
double _v_73_ = c1 * u3[i-1][j][k+1];
_v_26_ -= c1 * u3[i-1][j][k+1];
_v_73_ -= c1 * u3[i-1][j][k-1];
_v_35_ -= c1 * u3[i-1][j][k-1];
_t_151_ += _v_73_;
double _t_150_ = met1[i-1][j][k] * la[i-1][j][k] * met4[i-1][j][k];
double _v_74_ = _t_150_ * _t_151_;
_v_7_ -= c1 * u3[i-1][j][k+2];
_v_16_ -= c1 * u3[i-1][j][k-2];
_v_75_ += c1 * _v_74_;
_t_79_ += stry[j] * _v_75_;
double r1ic0jc0kc0 = r1[i][j][k];
r1ic0jc0kc0 += _t_79_;
double _t_17_ = _v_6_;
_t_17_ += _v_7_;
double _t_16_ = met1[i][j][k+2] * mu[i][j][k+2] * met4[i][j][k+2];
double _v_8_ = stry[j] * _t_16_ * _t_17_;
double _v_18_ = c2 * _v_8_;
double _t_5_ = _v_0_;
_t_5_ += _v_1_;
double _t_7_ = 2.0 * mu[i][j][k+2];
double _t_10_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_7_ += la[i][j][k+2];
double _t_4_ = met1[i][j][k+2] * _t_7_ * met2[i][j][k+2];
double _t_164_ = met1[i][j][k+2] * la[i][j][k+2] * met2[i][j][k+2];
double _t_3_ = _t_4_ * _t_5_;
double _v_2_ = stry[j] * _t_3_ * strx[i];
_v_18_ += c2 * _v_2_;
double _t_11_ = _v_3_;
_t_11_ += _v_4_;
double _v_5_ = _t_10_ * _t_11_;
_v_18_ += c2 * _v_5_;
double _t_24_ = _v_9_;
_t_24_ += _v_10_;
double _t_26_ = 2.0 * mu[i][j][k-2];
_t_26_ += la[i][j][k-2];
double _t_23_ = met1[i][j][k-2] * _t_26_ * met2[i][j][k-2];
double _t_176_ = met1[i][j][k-2] * la[i][j][k-2] * met2[i][j][k-2];
double _t_22_ = _t_23_ * _t_24_;
double _v_11_ = stry[j] * _t_22_ * strx[i];
_v_18_ += c2 * _v_11_;
double _t_30_ = _v_12_;
_t_30_ += _v_13_;
double _t_29_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
double _t_35_ = met1[i][j][k-2] * mu[i][j][k-2] * met4[i][j][k-2];
double _v_14_ = _t_29_ * _t_30_;
_v_18_ += c2 * _v_14_;
double _t_36_ = _v_15_;
_t_36_ += _v_16_;
double _v_17_ = stry[j] * _t_35_ * _t_36_;
_v_18_ += c2 * _v_17_;
double _t_0_ = _v_18_;
double _t_56_ = _v_25_;
_t_56_ += _v_26_;
double _t_55_ = met1[i][j][k+1] * mu[i][j][k+1] * met4[i][j][k+1];
double _v_27_ = stry[j] * _t_55_ * _t_56_;
double _v_37_ = c1 * _v_27_;
double _t_44_ = _v_19_;
_t_44_ += _v_20_;
double _t_46_ = 2.0 * mu[i][j][k+1];
double _t_49_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_46_ += la[i][j][k+1];
double _t_43_ = met1[i][j][k+1] * _t_46_ * met2[i][j][k+1];
double _t_189_ = met1[i][j][k+1] * la[i][j][k+1] * met2[i][j][k+1];
double _t_42_ = _t_43_ * _t_44_;
double _v_21_ = stry[j] * _t_42_ * strx[i+2];
_v_37_ += c1 * _v_21_;
double _t_50_ = _v_22_;
_t_50_ += _v_23_;
double _v_24_ = _t_49_ * _t_50_;
_v_37_ += c1 * _v_24_;
double _t_63_ = _v_28_;
_t_63_ += _v_29_;
double _t_65_ = 2.0 * mu[i][j][k-1];
_t_65_ += la[i][j][k-1];
double _t_62_ = met1[i][j][k-1] * _t_65_ * met2[i][j][k-1];
double _t_201_ = met1[i][j][k-1] * la[i][j][k-1] * met2[i][j][k-1];
double _t_61_ = _t_62_ * _t_63_;
double _v_30_ = stry[j] * _t_61_ * strx[i-2];
_v_37_ += c1 * _v_30_;
double _t_69_ = _v_31_;
_t_69_ += _v_32_;
double _t_68_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
double _t_74_ = met1[i][j][k-1] * mu[i][j][k-1] * met4[i][j][k-1];
double _v_33_ = _t_68_ * _t_69_;
_v_37_ += c1 * _v_33_;
double _t_75_ = _v_34_;
_t_75_ += _v_35_;
double _v_36_ = stry[j] * _t_74_ * _t_75_;
_v_37_ += c1 * _v_36_;
_t_0_ += _v_37_;
r1ic0jc0kc0 += _t_0_;
double _t_159_ = _t_10_;
double _v_76_ = c2 * u1[i][j+2][k+2];
double _v_102_ = c2 * u1[i][j+2][k+2];
_v_76_ -= c2 * u1[i][j-2][k+2];
double _v_108_ = c2 * u1[i][j-2][k+2];
double _t_160_ = _v_76_;
double _v_77_ = c1 * u1[i][j+1][k+2];
_v_77_ -= c1 * u1[i][j-1][k+2];
_t_160_ += _v_77_;
double _t_158_ = _t_159_ * _t_160_;
double _v_115_ = c2 * u1[i][j+1][k+2];
double _v_121_ = c2 * u1[i][j-1][k+2];
double _v_78_ = strx[i] * _t_158_ * stry[j+2];
double _v_88_ = c2 * _v_78_;
double _v_79_ = c2 * u2[i][j+2][k+2];
double _v_105_ = c2 * u2[i][j+2][k+2];
_v_79_ -= c2 * u2[i][j-2][k+2];
double _v_111_ = c2 * u2[i][j-2][k+2];
double _t_165_ = _v_79_;
double _v_80_ = c1 * u2[i][j+1][k+2];
_v_80_ -= c1 * u2[i][j-1][k+2];
_t_165_ += _v_80_;
double _v_81_ = _t_164_ * _t_165_;
double _v_118_ = c2 * u2[i][j+1][k+2];
double _v_124_ = c2 * u2[i][j-1][k+2];
_v_88_ += c2 * _v_81_;
double _t_171_ = _t_29_;
double _v_82_ = c2 * u1[i][j+2][k-2];
_v_102_ -= c2 * u1[i][j+2][k-2];
_v_82_ -= c2 * u1[i][j-2][k-2];
_v_108_ -= c2 * u1[i][j-2][k-2];
double _t_172_ = _v_82_;
double _v_83_ = c1 * u1[i][j+1][k-2];
_v_83_ -= c1 * u1[i][j-1][k-2];
_t_172_ += _v_83_;
double _t_170_ = _t_171_ * _t_172_;
_v_115_ -= c2 * u1[i][j+1][k-2];
_v_121_ -= c2 * u1[i][j-1][k-2];
double _v_84_ = strx[i] * _t_170_ * stry[j];
_v_88_ += c2 * _v_84_;
double _v_85_ = c2 * u2[i][j+2][k-2];
_v_105_ -= c2 * u2[i][j+2][k-2];
_v_85_ -= c2 * u2[i][j-2][k-2];
_v_111_ -= c2 * u2[i][j-2][k-2];
double _t_177_ = _v_85_;
double _v_86_ = c1 * u2[i][j+1][k-2];
_v_86_ -= c1 * u2[i][j-1][k-2];
_t_177_ += _v_86_;
double _v_87_ = _t_176_ * _t_177_;
_v_118_ -= c2 * u2[i][j+1][k-2];
_v_124_ -= c2 * u2[i][j-1][k-2];
_v_88_ += c2 * _v_87_;
double _t_155_ = _v_88_;
double _t_184_ = _t_49_;
double _v_89_ = c2 * u1[i][j+2][k+1];
_v_89_ -= c2 * u1[i][j-2][k+1];
double _t_185_ = _v_89_;
double _v_90_ = c1 * u1[i][j+1][k+1];
double _v_116_ = c1 * u1[i][j+1][k+1];
_v_90_ -= c1 * u1[i][j-1][k+1];
double _v_122_ = c1 * u1[i][j-1][k+1];
_t_185_ += _v_90_;
double _t_183_ = _t_184_ * _t_185_;
double _v_103_ = c1 * u1[i][j+2][k+1];
double _v_109_ = c1 * u1[i][j-2][k+1];
double _v_91_ = strx[i] * _t_183_ * stry[j-2];
double _v_101_ = c1 * _v_91_;
double _v_92_ = c2 * u2[i][j+2][k+1];
_v_92_ -= c2 * u2[i][j-2][k+1];
double _t_190_ = _v_92_;
double _v_93_ = c1 * u2[i][j+1][k+1];
double _v_119_ = c1 * u2[i][j+1][k+1];
_v_93_ -= c1 * u2[i][j-1][k+1];
double _v_125_ = c1 * u2[i][j-1][k+1];
_t_190_ += _v_93_;
double _v_94_ = _t_189_ * _t_190_;
double _v_106_ = c1 * u2[i][j+2][k+1];
double _v_112_ = c1 * u2[i][j-2][k+1];
_v_101_ += c1 * _v_94_;
double _t_196_ = _t_68_;
double _v_95_ = c2 * u1[i][j+2][k-1];
_v_95_ -= c2 * u1[i][j-2][k-1];
double _t_197_ = _v_95_;
double _v_96_ = c1 * u1[i][j+1][k-1];
_v_116_ -= c1 * u1[i][j+1][k-1];
_v_96_ -= c1 * u1[i][j-1][k-1];
_v_122_ -= c1 * u1[i][j-1][k-1];
_t_197_ += _v_96_;
double _t_195_ = _t_196_ * _t_197_;
_v_103_ -= c1 * u1[i][j+2][k-1];
_v_109_ -= c1 * u1[i][j-2][k-1];
double _v_97_ = strx[i] * _t_195_ * stry[j];
_v_101_ += c1 * _v_97_;
double _v_98_ = c2 * u2[i][j+2][k-1];
_v_98_ -= c2 * u2[i][j-2][k-1];
double _t_202_ = _v_98_;
double _v_99_ = c1 * u2[i][j+1][k-1];
_v_119_ -= c1 * u2[i][j+1][k-1];
_v_99_ -= c1 * u2[i][j-1][k-1];
_v_125_ -= c1 * u2[i][j-1][k-1];
_t_202_ += _v_99_;
double _v_100_ = _t_201_ * _t_202_;
_v_106_ -= c1 * u2[i][j+2][k-1];
_v_112_ -= c1 * u2[i][j-2][k-1];
_v_101_ += c1 * _v_100_;
_t_155_ += _v_101_;
r1ic0jc0kc0 += _t_155_;
double _t_211_ = _v_102_;
_t_211_ += _v_103_;
double _t_210_ = met1[i][j+2][k] * mu[i][j+2][k] * met3[i][j+2][k];
double _t_209_ = _t_210_ * _t_211_;
double _v_104_ = strx[i] * _t_209_ * stry[j+1];
double _v_114_ = c2 * _v_104_;
double _t_216_ = _v_105_;
_t_216_ += _v_106_;
double _t_215_ = met1[i][j+2][k] * mu[i][j+2][k] * met2[i][j+2][k];
double _v_107_ = _t_215_ * _t_216_;
_v_114_ += c2 * _v_107_;
double _t_223_ = _v_108_;
_t_223_ += _v_109_;
double _t_222_ = met1[i][j-2][k] * mu[i][j-2][k] * met3[i][j-2][k];
double _t_221_ = _t_222_ * _t_223_;
double _v_110_ = strx[i] * _t_221_ * stry[j];
_v_114_ += c2 * _v_110_;
double _t_228_ = _v_111_;
_t_228_ += _v_112_;
double _t_227_ = met1[i][j-2][k] * mu[i][j-2][k] * met2[i][j-2][k];
double _v_113_ = _t_227_ * _t_228_;
_v_114_ += c2 * _v_113_;
double _t_206_ = _v_114_;
double _t_236_ = _v_115_;
_t_236_ += _v_116_;
double _t_235_ = met1[i][j+1][k] * mu[i][j+1][k] * met3[i][j+1][k];
double _t_234_ = _t_235_ * _t_236_;
double _v_117_ = strx[i] * _t_234_ * stry[j-1];
double _v_127_ = c1 * _v_117_;
double _t_241_ = _v_118_;
_t_241_ += _v_119_;
double _t_240_ = met1[i][j+1][k] * mu[i][j+1][k] * met2[i][j+1][k];
double _v_120_ = _t_240_ * _t_241_;
_v_127_ += c1 * _v_120_;
double _t_248_ = _v_121_;
_t_248_ += _v_122_;
double _t_247_ = met1[i][j-1][k] * mu[i][j-1][k] * met3[i][j-1][k];
double _t_246_ = _t_247_ * _t_248_;
double _v_123_ = strx[i] * _t_246_ * stry[j];
_v_127_ += c1 * _v_123_;
double _t_253_ = _v_124_;
_t_253_ += _v_125_;
double _t_252_ = met1[i][j-1][k] * mu[i][j-1][k] * met2[i][j-1][k];
double _v_126_ = _t_252_ * _t_253_;
_v_127_ += c1 * _v_126_;
_t_206_ += _v_127_;
r1ic0jc0kc0 += _t_206_;
double _v_128_ = c2 * u2[i+2][j+2][k];
_v_128_ -= c2 * u2[i-2][j+2][k];
double _v_131_ = c2 * u2[i+2][j-2][k];
_v_131_ -= c2 * u2[i-2][j-2][k];
double _v_142_ = c2 * u2[i+2][j+2][k];
_v_142_ -= c2 * u2[i+2][j-2][k];
double _v_145_ = c2 * u2[i-2][j+2][k];
_v_145_ -= c2 * u2[i-2][j-2][k];
double _v_135_ = c2 * u2[i+2][j+1][k];
_v_135_ -= c2 * u2[i-2][j+1][k];
double _v_138_ = c2 * u2[i+2][j-1][k];
_v_138_ -= c2 * u2[i-2][j-1][k];
double _v_149_ = c2 * u2[i+1][j+2][k];
_v_149_ -= c2 * u2[i+1][j-2][k];
double _v_152_ = c2 * u2[i-1][j+2][k];
_v_152_ -= c2 * u2[i-1][j-2][k];
double _v_129_ = c1 * u2[i+1][j+2][k];
_v_129_ -= c1 * u2[i-1][j+2][k];
double _v_132_ = c1 * u2[i+1][j-2][k];
_v_132_ -= c1 * u2[i-1][j-2][k];
double _v_143_ = c1 * u2[i+2][j+1][k];
_v_143_ -= c1 * u2[i+2][j-1][k];
double _v_146_ = c1 * u2[i-2][j+1][k];
_v_146_ -= c1 * u2[i-2][j-1][k];
double _v_136_ = c1 * u2[i+1][j+1][k];
_v_136_ -= c1 * u2[i-1][j+1][k];
double _v_139_ = c1 * u2[i+1][j-1][k];
_v_139_ -= c1 * u2[i-1][j-1][k];
double _v_150_ = c1 * u2[i+1][j+1][k];
_v_150_ -= c1 * u2[i+1][j-1][k];
double _v_153_ = c1 * u2[i-1][j+1][k];
_v_153_ -= c1 * u2[i-1][j-1][k];
double _t_260_ = _v_128_;
_t_260_ += _v_129_;
double _t_259_ = met1[i][j+2][k] * mu[i][j+2][k] * met1[i][j+2][k];
double _v_130_ = _t_259_ * _t_260_;
double _v_134_ = c2 * _v_130_;
double _t_265_ = _v_131_;
_t_265_ += _v_132_;
double _t_264_ = met1[i][j-2][k] * mu[i][j-2][k] * met1[i][j-2][k];
double _v_133_ = _t_264_ * _t_265_;
_v_134_ += c2 * _v_133_;
double _t_257_ = _v_134_;
double _t_282_ = _v_142_;
_t_282_ += _v_143_;
double _v_144_ = _t_281_ * _t_282_;
double _v_148_ = c2 * _v_144_;
double _t_287_ = _v_145_;
_t_287_ += _v_146_;
double _v_147_ = _t_286_ * _t_287_;
_v_148_ += c2 * _v_147_;
_t_257_ += _v_148_;
double _t_271_ = _v_135_;
_t_271_ += _v_136_;
double _t_270_ = met1[i][j+1][k] * mu[i][j+1][k] * met1[i][j+1][k];
double _v_137_ = _t_270_ * _t_271_;
double _v_141_ = c1 * _v_137_;
double _t_276_ = _v_138_;
_t_276_ += _v_139_;
double _t_275_ = met1[i][j-1][k] * mu[i][j-1][k] * met1[i][j-1][k];
double _v_140_ = _t_275_ * _t_276_;
_v_141_ += c1 * _v_140_;
_t_257_ += _v_141_;
double _t_293_ = _v_149_;
_t_293_ += _v_150_;
double _v_151_ = _t_292_ * _t_293_;
double _v_155_ = c1 * _v_151_;
double _t_298_ = _v_152_;
_t_298_ += _v_153_;
double _v_154_ = _t_297_ * _t_298_;
_v_155_ += c1 * _v_154_;
_t_257_ += _v_155_;
r1ic0jc0kc0 += _t_257_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
hipMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
hipMemcpy (r1, h_r1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u1;
hipMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
hipMemcpy (u1, h_u1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u2;
hipMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
hipMemcpy (u2, h_u2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u3;
hipMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
hipMemcpy (u3, h_u3, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *mu;
hipMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
hipMemcpy (mu, h_mu, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *la;
hipMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
hipMemcpy (la, h_la, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met1;
hipMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
hipMemcpy (met1, h_met1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met2;
hipMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
hipMemcpy (met2, h_met2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met3;
hipMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
hipMemcpy (met3, h_met3, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met4;
hipMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
hipMemcpy (met4, h_met4, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *strx;
hipMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
hipMemcpy (strx, h_strx, sizeof(double)*N, hipMemcpyHostToDevice);
double *stry;
hipMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
hipMemcpy (stry, h_stry, sizeof(double)*N, hipMemcpyHostToDevice);
dim3 blockconfig (16, 1, 8);
dim3 gridconfig (ceil(N, blockconfig.x), 1, ceil(N, blockconfig.z));
hipLaunchKernelGGL(( curvi) , dim3(gridconfig), dim3(blockconfig), 0, 0, r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
hipMemcpy (h_r1, r1, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
}
| 717bae9177ddc2acd82c88ff73268d60efc27646.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void curvi (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_i= (int)(blockDim.z);
int i0 = (int)(blockIdx.z)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.z);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (i>=2 & k>=2 & i<=N-3 & k<=N-3) {
for (int j=2; j<=N-3; j++) {
double _t_86_ = 2.0 * mu[i+2][j][k];
_t_86_ += la[i+2][j][k];
double _t_83_ = met1[i+2][j][k] * _t_86_ * met2[i+2][j][k];
double _t_281_ = met1[i+2][j][k] * la[i+2][j][k] * met1[i+2][j][k];
double _v_38_ = c2 * u1[i+2][j][k+2];
double _v_0_ = c2 * u1[i+2][j][k+2];
_v_38_ -= c2 * u1[i+2][j][k-2];
double _v_9_ = c2 * u1[i+2][j][k-2];
double _t_84_ = _v_38_;
double _v_39_ = c1 * u1[i+2][j][k+1];
_v_39_ -= c1 * u1[i+2][j][k-1];
_t_84_ += _v_39_;
double _v_40_ = strx[i] * _t_83_ * _t_84_;
double _v_19_ = c2 * u1[i+2][j][k+1];
double _v_28_ = c2 * u1[i+2][j][k-1];
double _v_56_ = c2 * _v_40_;
double _v_41_ = c2 * u2[i+2][j][k+2];
double _v_3_ = c2 * u2[i+2][j][k+2];
_v_41_ -= c2 * u2[i+2][j][k-2];
double _v_12_ = c2 * u2[i+2][j][k-2];
double _t_91_ = _v_41_;
double _v_42_ = c1 * u2[i+2][j][k+1];
_v_42_ -= c1 * u2[i+2][j][k-1];
_t_91_ += _v_42_;
double _t_90_ = met1[i+2][j][k] * la[i+2][j][k] * met3[i+2][j][k];
double _v_43_ = stry[j] * _t_90_ * _t_91_;
double _v_22_ = c2 * u2[i+2][j][k+1];
double _v_31_ = c2 * u2[i+2][j][k-1];
_v_56_ += c2 * _v_43_;
double _v_44_ = c2 * u3[i+2][j][k+2];
double _v_6_ = c2 * u3[i+2][j][k+2];
_v_44_ -= c2 * u3[i+2][j][k-2];
double _v_15_ = c2 * u3[i+2][j][k-2];
double _t_96_ = _v_44_;
double _v_45_ = c1 * u3[i+2][j][k+1];
_v_45_ -= c1 * u3[i+2][j][k-1];
_t_96_ += _v_45_;
double _t_95_ = met1[i+2][j][k] * la[i+2][j][k] * met4[i+2][j][k];
double _v_46_ = _t_95_ * _t_96_;
double _v_25_ = c2 * u3[i+2][j][k+1];
double _v_34_ = c2 * u3[i+2][j][k-1];
_v_56_ += c2 * _v_46_;
double _t_104_ = 2.0 * mu[i-2][j][k];
_t_104_ += la[i-2][j][k];
double _t_101_ = met1[i-2][j][k] * _t_104_ * met2[i-2][j][k];
double _t_286_ = met1[i-2][j][k] * la[i-2][j][k] * met1[i-2][j][k];
double _v_47_ = c2 * u1[i-2][j][k+2];
_v_0_ -= c2 * u1[i-2][j][k+2];
_v_47_ -= c2 * u1[i-2][j][k-2];
_v_9_ -= c2 * u1[i-2][j][k-2];
double _t_102_ = _v_47_;
double _v_48_ = c1 * u1[i-2][j][k+1];
_v_48_ -= c1 * u1[i-2][j][k-1];
_t_102_ += _v_48_;
double _v_49_ = strx[i] * _t_101_ * _t_102_;
_v_19_ -= c2 * u1[i-2][j][k+1];
_v_28_ -= c2 * u1[i-2][j][k-1];
_v_56_ += c2 * _v_49_;
double _v_50_ = c2 * u2[i-2][j][k+2];
_v_3_ -= c2 * u2[i-2][j][k+2];
_v_50_ -= c2 * u2[i-2][j][k-2];
_v_12_ -= c2 * u2[i-2][j][k-2];
double _t_109_ = _v_50_;
double _v_51_ = c1 * u2[i-2][j][k+1];
_v_51_ -= c1 * u2[i-2][j][k-1];
_t_109_ += _v_51_;
double _t_108_ = met1[i-2][j][k] * la[i-2][j][k] * met3[i-2][j][k];
double _v_52_ = stry[j] * _t_108_ * _t_109_;
_v_22_ -= c2 * u2[i-2][j][k+1];
_v_31_ -= c2 * u2[i-2][j][k-1];
_v_56_ += c2 * _v_52_;
double _v_53_ = c2 * u3[i-2][j][k+2];
_v_6_ -= c2 * u3[i-2][j][k+2];
_v_53_ -= c2 * u3[i-2][j][k-2];
_v_15_ -= c2 * u3[i-2][j][k-2];
double _t_114_ = _v_53_;
double _v_54_ = c1 * u3[i-2][j][k+1];
_v_54_ -= c1 * u3[i-2][j][k-1];
_t_114_ += _v_54_;
double _t_113_ = met1[i-2][j][k] * la[i-2][j][k] * met4[i-2][j][k];
double _v_55_ = _t_113_ * _t_114_;
_v_25_ -= c2 * u3[i-2][j][k+1];
_v_34_ -= c2 * u3[i-2][j][k-1];
_v_56_ += c2 * _v_55_;
double _t_79_ = stry[j] * _v_56_;
double _t_123_ = 2.0 * mu[i+1][j][k];
_t_123_ += la[i+1][j][k];
double _t_120_ = met1[i+1][j][k] * _t_123_ * met2[i+1][j][k];
double _t_292_ = met1[i+1][j][k] * la[i+1][j][k] * met1[i+1][j][k];
double _v_57_ = c2 * u1[i+1][j][k+2];
_v_57_ -= c2 * u1[i+1][j][k-2];
double _t_121_ = _v_57_;
double _v_58_ = c1 * u1[i+1][j][k+1];
double _v_20_ = c1 * u1[i+1][j][k+1];
_v_58_ -= c1 * u1[i+1][j][k-1];
double _v_29_ = c1 * u1[i+1][j][k-1];
_t_121_ += _v_58_;
double _v_59_ = strx[i] * _t_120_ * _t_121_;
double _v_1_ = c1 * u1[i+1][j][k+2];
double _v_10_ = c1 * u1[i+1][j][k-2];
double _v_75_ = c1 * _v_59_;
double _v_60_ = c2 * u2[i+1][j][k+2];
_v_60_ -= c2 * u2[i+1][j][k-2];
double _t_128_ = _v_60_;
double _v_61_ = c1 * u2[i+1][j][k+1];
double _v_23_ = c1 * u2[i+1][j][k+1];
_v_61_ -= c1 * u2[i+1][j][k-1];
double _v_32_ = c1 * u2[i+1][j][k-1];
_t_128_ += _v_61_;
double _t_127_ = met1[i+1][j][k] * la[i+1][j][k] * met3[i+1][j][k];
double _v_62_ = stry[j] * _t_127_ * _t_128_;
double _v_4_ = c1 * u2[i+1][j][k+2];
double _v_13_ = c1 * u2[i+1][j][k-2];
_v_75_ += c1 * _v_62_;
double _v_63_ = c2 * u3[i+1][j][k+2];
_v_63_ -= c2 * u3[i+1][j][k-2];
double _t_133_ = _v_63_;
double _v_64_ = c1 * u3[i+1][j][k+1];
double _v_26_ = c1 * u3[i+1][j][k+1];
_v_64_ -= c1 * u3[i+1][j][k-1];
double _v_35_ = c1 * u3[i+1][j][k-1];
_t_133_ += _v_64_;
double _t_132_ = met1[i+1][j][k] * la[i+1][j][k] * met4[i+1][j][k];
double _v_65_ = _t_132_ * _t_133_;
double _v_7_ = c1 * u3[i+1][j][k+2];
double _v_16_ = c1 * u3[i+1][j][k-2];
_v_75_ += c1 * _v_65_;
double _t_141_ = 2.0 * mu[i-1][j][k];
_t_141_ += la[i-1][j][k];
double _t_138_ = met1[i-1][j][k] * _t_141_ * met2[i-1][j][k];
double _t_297_ = met1[i-1][j][k] * la[i-1][j][k] * met1[i-1][j][k];
double _v_66_ = c2 * u1[i-1][j][k+2];
_v_66_ -= c2 * u1[i-1][j][k-2];
double _t_139_ = _v_66_;
double _v_67_ = c1 * u1[i-1][j][k+1];
_v_20_ -= c1 * u1[i-1][j][k+1];
_v_67_ -= c1 * u1[i-1][j][k-1];
_v_29_ -= c1 * u1[i-1][j][k-1];
_t_139_ += _v_67_;
double _v_68_ = strx[i] * _t_138_ * _t_139_;
_v_1_ -= c1 * u1[i-1][j][k+2];
_v_10_ -= c1 * u1[i-1][j][k-2];
_v_75_ += c1 * _v_68_;
double _v_69_ = c2 * u2[i-1][j][k+2];
_v_69_ -= c2 * u2[i-1][j][k-2];
double _t_146_ = _v_69_;
double _v_70_ = c1 * u2[i-1][j][k+1];
_v_23_ -= c1 * u2[i-1][j][k+1];
_v_70_ -= c1 * u2[i-1][j][k-1];
_v_32_ -= c1 * u2[i-1][j][k-1];
_t_146_ += _v_70_;
double _t_145_ = met1[i-1][j][k] * la[i-1][j][k] * met3[i-1][j][k];
double _v_71_ = stry[j] * _t_145_ * _t_146_;
_v_4_ -= c1 * u2[i-1][j][k+2];
_v_13_ -= c1 * u2[i-1][j][k-2];
_v_75_ += c1 * _v_71_;
double _v_72_ = c2 * u3[i-1][j][k+2];
_v_72_ -= c2 * u3[i-1][j][k-2];
double _t_151_ = _v_72_;
double _v_73_ = c1 * u3[i-1][j][k+1];
_v_26_ -= c1 * u3[i-1][j][k+1];
_v_73_ -= c1 * u3[i-1][j][k-1];
_v_35_ -= c1 * u3[i-1][j][k-1];
_t_151_ += _v_73_;
double _t_150_ = met1[i-1][j][k] * la[i-1][j][k] * met4[i-1][j][k];
double _v_74_ = _t_150_ * _t_151_;
_v_7_ -= c1 * u3[i-1][j][k+2];
_v_16_ -= c1 * u3[i-1][j][k-2];
_v_75_ += c1 * _v_74_;
_t_79_ += stry[j] * _v_75_;
double r1ic0jc0kc0 = r1[i][j][k];
r1ic0jc0kc0 += _t_79_;
double _t_17_ = _v_6_;
_t_17_ += _v_7_;
double _t_16_ = met1[i][j][k+2] * mu[i][j][k+2] * met4[i][j][k+2];
double _v_8_ = stry[j] * _t_16_ * _t_17_;
double _v_18_ = c2 * _v_8_;
double _t_5_ = _v_0_;
_t_5_ += _v_1_;
double _t_7_ = 2.0 * mu[i][j][k+2];
double _t_10_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_7_ += la[i][j][k+2];
double _t_4_ = met1[i][j][k+2] * _t_7_ * met2[i][j][k+2];
double _t_164_ = met1[i][j][k+2] * la[i][j][k+2] * met2[i][j][k+2];
double _t_3_ = _t_4_ * _t_5_;
double _v_2_ = stry[j] * _t_3_ * strx[i];
_v_18_ += c2 * _v_2_;
double _t_11_ = _v_3_;
_t_11_ += _v_4_;
double _v_5_ = _t_10_ * _t_11_;
_v_18_ += c2 * _v_5_;
double _t_24_ = _v_9_;
_t_24_ += _v_10_;
double _t_26_ = 2.0 * mu[i][j][k-2];
_t_26_ += la[i][j][k-2];
double _t_23_ = met1[i][j][k-2] * _t_26_ * met2[i][j][k-2];
double _t_176_ = met1[i][j][k-2] * la[i][j][k-2] * met2[i][j][k-2];
double _t_22_ = _t_23_ * _t_24_;
double _v_11_ = stry[j] * _t_22_ * strx[i];
_v_18_ += c2 * _v_11_;
double _t_30_ = _v_12_;
_t_30_ += _v_13_;
double _t_29_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
double _t_35_ = met1[i][j][k-2] * mu[i][j][k-2] * met4[i][j][k-2];
double _v_14_ = _t_29_ * _t_30_;
_v_18_ += c2 * _v_14_;
double _t_36_ = _v_15_;
_t_36_ += _v_16_;
double _v_17_ = stry[j] * _t_35_ * _t_36_;
_v_18_ += c2 * _v_17_;
double _t_0_ = _v_18_;
double _t_56_ = _v_25_;
_t_56_ += _v_26_;
double _t_55_ = met1[i][j][k+1] * mu[i][j][k+1] * met4[i][j][k+1];
double _v_27_ = stry[j] * _t_55_ * _t_56_;
double _v_37_ = c1 * _v_27_;
double _t_44_ = _v_19_;
_t_44_ += _v_20_;
double _t_46_ = 2.0 * mu[i][j][k+1];
double _t_49_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_46_ += la[i][j][k+1];
double _t_43_ = met1[i][j][k+1] * _t_46_ * met2[i][j][k+1];
double _t_189_ = met1[i][j][k+1] * la[i][j][k+1] * met2[i][j][k+1];
double _t_42_ = _t_43_ * _t_44_;
double _v_21_ = stry[j] * _t_42_ * strx[i+2];
_v_37_ += c1 * _v_21_;
double _t_50_ = _v_22_;
_t_50_ += _v_23_;
double _v_24_ = _t_49_ * _t_50_;
_v_37_ += c1 * _v_24_;
double _t_63_ = _v_28_;
_t_63_ += _v_29_;
double _t_65_ = 2.0 * mu[i][j][k-1];
_t_65_ += la[i][j][k-1];
double _t_62_ = met1[i][j][k-1] * _t_65_ * met2[i][j][k-1];
double _t_201_ = met1[i][j][k-1] * la[i][j][k-1] * met2[i][j][k-1];
double _t_61_ = _t_62_ * _t_63_;
double _v_30_ = stry[j] * _t_61_ * strx[i-2];
_v_37_ += c1 * _v_30_;
double _t_69_ = _v_31_;
_t_69_ += _v_32_;
double _t_68_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
double _t_74_ = met1[i][j][k-1] * mu[i][j][k-1] * met4[i][j][k-1];
double _v_33_ = _t_68_ * _t_69_;
_v_37_ += c1 * _v_33_;
double _t_75_ = _v_34_;
_t_75_ += _v_35_;
double _v_36_ = stry[j] * _t_74_ * _t_75_;
_v_37_ += c1 * _v_36_;
_t_0_ += _v_37_;
r1ic0jc0kc0 += _t_0_;
double _t_159_ = _t_10_;
double _v_76_ = c2 * u1[i][j+2][k+2];
double _v_102_ = c2 * u1[i][j+2][k+2];
_v_76_ -= c2 * u1[i][j-2][k+2];
double _v_108_ = c2 * u1[i][j-2][k+2];
double _t_160_ = _v_76_;
double _v_77_ = c1 * u1[i][j+1][k+2];
_v_77_ -= c1 * u1[i][j-1][k+2];
_t_160_ += _v_77_;
double _t_158_ = _t_159_ * _t_160_;
double _v_115_ = c2 * u1[i][j+1][k+2];
double _v_121_ = c2 * u1[i][j-1][k+2];
double _v_78_ = strx[i] * _t_158_ * stry[j+2];
double _v_88_ = c2 * _v_78_;
double _v_79_ = c2 * u2[i][j+2][k+2];
double _v_105_ = c2 * u2[i][j+2][k+2];
_v_79_ -= c2 * u2[i][j-2][k+2];
double _v_111_ = c2 * u2[i][j-2][k+2];
double _t_165_ = _v_79_;
double _v_80_ = c1 * u2[i][j+1][k+2];
_v_80_ -= c1 * u2[i][j-1][k+2];
_t_165_ += _v_80_;
double _v_81_ = _t_164_ * _t_165_;
double _v_118_ = c2 * u2[i][j+1][k+2];
double _v_124_ = c2 * u2[i][j-1][k+2];
_v_88_ += c2 * _v_81_;
double _t_171_ = _t_29_;
double _v_82_ = c2 * u1[i][j+2][k-2];
_v_102_ -= c2 * u1[i][j+2][k-2];
_v_82_ -= c2 * u1[i][j-2][k-2];
_v_108_ -= c2 * u1[i][j-2][k-2];
double _t_172_ = _v_82_;
double _v_83_ = c1 * u1[i][j+1][k-2];
_v_83_ -= c1 * u1[i][j-1][k-2];
_t_172_ += _v_83_;
double _t_170_ = _t_171_ * _t_172_;
_v_115_ -= c2 * u1[i][j+1][k-2];
_v_121_ -= c2 * u1[i][j-1][k-2];
double _v_84_ = strx[i] * _t_170_ * stry[j];
_v_88_ += c2 * _v_84_;
double _v_85_ = c2 * u2[i][j+2][k-2];
_v_105_ -= c2 * u2[i][j+2][k-2];
_v_85_ -= c2 * u2[i][j-2][k-2];
_v_111_ -= c2 * u2[i][j-2][k-2];
double _t_177_ = _v_85_;
double _v_86_ = c1 * u2[i][j+1][k-2];
_v_86_ -= c1 * u2[i][j-1][k-2];
_t_177_ += _v_86_;
double _v_87_ = _t_176_ * _t_177_;
_v_118_ -= c2 * u2[i][j+1][k-2];
_v_124_ -= c2 * u2[i][j-1][k-2];
_v_88_ += c2 * _v_87_;
double _t_155_ = _v_88_;
double _t_184_ = _t_49_;
double _v_89_ = c2 * u1[i][j+2][k+1];
_v_89_ -= c2 * u1[i][j-2][k+1];
double _t_185_ = _v_89_;
double _v_90_ = c1 * u1[i][j+1][k+1];
double _v_116_ = c1 * u1[i][j+1][k+1];
_v_90_ -= c1 * u1[i][j-1][k+1];
double _v_122_ = c1 * u1[i][j-1][k+1];
_t_185_ += _v_90_;
double _t_183_ = _t_184_ * _t_185_;
double _v_103_ = c1 * u1[i][j+2][k+1];
double _v_109_ = c1 * u1[i][j-2][k+1];
double _v_91_ = strx[i] * _t_183_ * stry[j-2];
double _v_101_ = c1 * _v_91_;
double _v_92_ = c2 * u2[i][j+2][k+1];
_v_92_ -= c2 * u2[i][j-2][k+1];
double _t_190_ = _v_92_;
double _v_93_ = c1 * u2[i][j+1][k+1];
double _v_119_ = c1 * u2[i][j+1][k+1];
_v_93_ -= c1 * u2[i][j-1][k+1];
double _v_125_ = c1 * u2[i][j-1][k+1];
_t_190_ += _v_93_;
double _v_94_ = _t_189_ * _t_190_;
double _v_106_ = c1 * u2[i][j+2][k+1];
double _v_112_ = c1 * u2[i][j-2][k+1];
_v_101_ += c1 * _v_94_;
double _t_196_ = _t_68_;
double _v_95_ = c2 * u1[i][j+2][k-1];
_v_95_ -= c2 * u1[i][j-2][k-1];
double _t_197_ = _v_95_;
double _v_96_ = c1 * u1[i][j+1][k-1];
_v_116_ -= c1 * u1[i][j+1][k-1];
_v_96_ -= c1 * u1[i][j-1][k-1];
_v_122_ -= c1 * u1[i][j-1][k-1];
_t_197_ += _v_96_;
double _t_195_ = _t_196_ * _t_197_;
_v_103_ -= c1 * u1[i][j+2][k-1];
_v_109_ -= c1 * u1[i][j-2][k-1];
double _v_97_ = strx[i] * _t_195_ * stry[j];
_v_101_ += c1 * _v_97_;
double _v_98_ = c2 * u2[i][j+2][k-1];
_v_98_ -= c2 * u2[i][j-2][k-1];
double _t_202_ = _v_98_;
double _v_99_ = c1 * u2[i][j+1][k-1];
_v_119_ -= c1 * u2[i][j+1][k-1];
_v_99_ -= c1 * u2[i][j-1][k-1];
_v_125_ -= c1 * u2[i][j-1][k-1];
_t_202_ += _v_99_;
double _v_100_ = _t_201_ * _t_202_;
_v_106_ -= c1 * u2[i][j+2][k-1];
_v_112_ -= c1 * u2[i][j-2][k-1];
_v_101_ += c1 * _v_100_;
_t_155_ += _v_101_;
r1ic0jc0kc0 += _t_155_;
double _t_211_ = _v_102_;
_t_211_ += _v_103_;
double _t_210_ = met1[i][j+2][k] * mu[i][j+2][k] * met3[i][j+2][k];
double _t_209_ = _t_210_ * _t_211_;
double _v_104_ = strx[i] * _t_209_ * stry[j+1];
double _v_114_ = c2 * _v_104_;
double _t_216_ = _v_105_;
_t_216_ += _v_106_;
double _t_215_ = met1[i][j+2][k] * mu[i][j+2][k] * met2[i][j+2][k];
double _v_107_ = _t_215_ * _t_216_;
_v_114_ += c2 * _v_107_;
double _t_223_ = _v_108_;
_t_223_ += _v_109_;
double _t_222_ = met1[i][j-2][k] * mu[i][j-2][k] * met3[i][j-2][k];
double _t_221_ = _t_222_ * _t_223_;
double _v_110_ = strx[i] * _t_221_ * stry[j];
_v_114_ += c2 * _v_110_;
double _t_228_ = _v_111_;
_t_228_ += _v_112_;
double _t_227_ = met1[i][j-2][k] * mu[i][j-2][k] * met2[i][j-2][k];
double _v_113_ = _t_227_ * _t_228_;
_v_114_ += c2 * _v_113_;
double _t_206_ = _v_114_;
double _t_236_ = _v_115_;
_t_236_ += _v_116_;
double _t_235_ = met1[i][j+1][k] * mu[i][j+1][k] * met3[i][j+1][k];
double _t_234_ = _t_235_ * _t_236_;
double _v_117_ = strx[i] * _t_234_ * stry[j-1];
double _v_127_ = c1 * _v_117_;
double _t_241_ = _v_118_;
_t_241_ += _v_119_;
double _t_240_ = met1[i][j+1][k] * mu[i][j+1][k] * met2[i][j+1][k];
double _v_120_ = _t_240_ * _t_241_;
_v_127_ += c1 * _v_120_;
double _t_248_ = _v_121_;
_t_248_ += _v_122_;
double _t_247_ = met1[i][j-1][k] * mu[i][j-1][k] * met3[i][j-1][k];
double _t_246_ = _t_247_ * _t_248_;
double _v_123_ = strx[i] * _t_246_ * stry[j];
_v_127_ += c1 * _v_123_;
double _t_253_ = _v_124_;
_t_253_ += _v_125_;
double _t_252_ = met1[i][j-1][k] * mu[i][j-1][k] * met2[i][j-1][k];
double _v_126_ = _t_252_ * _t_253_;
_v_127_ += c1 * _v_126_;
_t_206_ += _v_127_;
r1ic0jc0kc0 += _t_206_;
double _v_128_ = c2 * u2[i+2][j+2][k];
_v_128_ -= c2 * u2[i-2][j+2][k];
double _v_131_ = c2 * u2[i+2][j-2][k];
_v_131_ -= c2 * u2[i-2][j-2][k];
double _v_142_ = c2 * u2[i+2][j+2][k];
_v_142_ -= c2 * u2[i+2][j-2][k];
double _v_145_ = c2 * u2[i-2][j+2][k];
_v_145_ -= c2 * u2[i-2][j-2][k];
double _v_135_ = c2 * u2[i+2][j+1][k];
_v_135_ -= c2 * u2[i-2][j+1][k];
double _v_138_ = c2 * u2[i+2][j-1][k];
_v_138_ -= c2 * u2[i-2][j-1][k];
double _v_149_ = c2 * u2[i+1][j+2][k];
_v_149_ -= c2 * u2[i+1][j-2][k];
double _v_152_ = c2 * u2[i-1][j+2][k];
_v_152_ -= c2 * u2[i-1][j-2][k];
double _v_129_ = c1 * u2[i+1][j+2][k];
_v_129_ -= c1 * u2[i-1][j+2][k];
double _v_132_ = c1 * u2[i+1][j-2][k];
_v_132_ -= c1 * u2[i-1][j-2][k];
double _v_143_ = c1 * u2[i+2][j+1][k];
_v_143_ -= c1 * u2[i+2][j-1][k];
double _v_146_ = c1 * u2[i-2][j+1][k];
_v_146_ -= c1 * u2[i-2][j-1][k];
double _v_136_ = c1 * u2[i+1][j+1][k];
_v_136_ -= c1 * u2[i-1][j+1][k];
double _v_139_ = c1 * u2[i+1][j-1][k];
_v_139_ -= c1 * u2[i-1][j-1][k];
double _v_150_ = c1 * u2[i+1][j+1][k];
_v_150_ -= c1 * u2[i+1][j-1][k];
double _v_153_ = c1 * u2[i-1][j+1][k];
_v_153_ -= c1 * u2[i-1][j-1][k];
double _t_260_ = _v_128_;
_t_260_ += _v_129_;
double _t_259_ = met1[i][j+2][k] * mu[i][j+2][k] * met1[i][j+2][k];
double _v_130_ = _t_259_ * _t_260_;
double _v_134_ = c2 * _v_130_;
double _t_265_ = _v_131_;
_t_265_ += _v_132_;
double _t_264_ = met1[i][j-2][k] * mu[i][j-2][k] * met1[i][j-2][k];
double _v_133_ = _t_264_ * _t_265_;
_v_134_ += c2 * _v_133_;
double _t_257_ = _v_134_;
double _t_282_ = _v_142_;
_t_282_ += _v_143_;
double _v_144_ = _t_281_ * _t_282_;
double _v_148_ = c2 * _v_144_;
double _t_287_ = _v_145_;
_t_287_ += _v_146_;
double _v_147_ = _t_286_ * _t_287_;
_v_148_ += c2 * _v_147_;
_t_257_ += _v_148_;
double _t_271_ = _v_135_;
_t_271_ += _v_136_;
double _t_270_ = met1[i][j+1][k] * mu[i][j+1][k] * met1[i][j+1][k];
double _v_137_ = _t_270_ * _t_271_;
double _v_141_ = c1 * _v_137_;
double _t_276_ = _v_138_;
_t_276_ += _v_139_;
double _t_275_ = met1[i][j-1][k] * mu[i][j-1][k] * met1[i][j-1][k];
double _v_140_ = _t_275_ * _t_276_;
_v_141_ += c1 * _v_140_;
_t_257_ += _v_141_;
double _t_293_ = _v_149_;
_t_293_ += _v_150_;
double _v_151_ = _t_292_ * _t_293_;
double _v_155_ = c1 * _v_151_;
double _t_298_ = _v_152_;
_t_298_ += _v_153_;
double _v_154_ = _t_297_ * _t_298_;
_v_155_ += c1 * _v_154_;
_t_257_ += _v_155_;
r1ic0jc0kc0 += _t_257_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
cudaMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u1;
cudaMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u2;
cudaMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u3;
cudaMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met1;
cudaMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met2;
cudaMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met3;
cudaMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met4;
cudaMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 1, 8);
dim3 gridconfig (ceil(N, blockconfig.x), 1, ceil(N, blockconfig.z));
curvi <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
}
|
4ed0b1d9dfcbe7e82e86d3200cbefb3c54874ade.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
#define Threads 3
#define Blocks 4
#define N Threads*Blocks
__global__ // GPU function
void add(int *a, int *b, int n)
{
// Get ID of thread being executed
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// if the thread id is less than the number of loops required
if (tid < n)
// Add them together
b[tid] += a[tid];
// Notice there is no return statement
}
int main(void)
{
// Calculate memory size
int memSize = N*sizeof(int);
// Initialize host (CPU) memory
int *h_a, *h_b;
h_a = (int*)malloc(memSize);
h_b = (int*)malloc(memSize);
// Initialize device (GPU) memory
int *d_a, *d_b;
hipMalloc((void**)&d_a, memSize);
hipMalloc((void**)&d_b, memSize);
// Add some values to host arrays a and b to sum.
for (int i = 0; i < N; i++) {
h_a[i] = i;
h_b[i] = i*i;
}
// Send host (CPU) memory to device (GPU)
hipMemcpy(d_a, h_a, memSize, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, memSize, hipMemcpyHostToDevice);
// Run function add() on device (GPU)
hipLaunchKernelGGL(( add), dim3(Blocks), dim3(Threads), 0, 0, d_a, d_b, N);
// Make sure all threads on GPU finish
hipDeviceSynchronize();
// Send device (GPU) memory back to host(CPU)
hipMemcpy(h_b, d_b, memSize, hipMemcpyDeviceToHost);
// Print output from device (GPU)
for (int i = 0; i < N; i++)
cout << h_b[i] << "\n";
// Free host (CPU) memory
free(h_a);
free(h_b);
// Free device (GPU) memory
hipFree(d_a);
hipFree(d_b);
// Exit with success!
return 1;
}
| 4ed0b1d9dfcbe7e82e86d3200cbefb3c54874ade.cu | #include <iostream>
using namespace std;
#define Threads 3
#define Blocks 4
#define N Threads*Blocks
__global__ // GPU function
void add(int *a, int *b, int n)
{
// Get ID of thread being executed
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// if the thread id is less than the number of loops required
if (tid < n)
// Add them together
b[tid] += a[tid];
// Notice there is no return statement
}
int main(void)
{
// Calculate memory size
int memSize = N*sizeof(int);
// Initialize host (CPU) memory
int *h_a, *h_b;
h_a = (int*)malloc(memSize);
h_b = (int*)malloc(memSize);
// Initialize device (GPU) memory
int *d_a, *d_b;
cudaMalloc((void**)&d_a, memSize);
cudaMalloc((void**)&d_b, memSize);
// Add some values to host arrays a and b to sum.
for (int i = 0; i < N; i++) {
h_a[i] = i;
h_b[i] = i*i;
}
// Send host (CPU) memory to device (GPU)
cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, memSize, cudaMemcpyHostToDevice);
// Run function add() on device (GPU)
add<<<Blocks, Threads>>>(d_a, d_b, N);
// Make sure all threads on GPU finish
cudaThreadSynchronize();
// Send device (GPU) memory back to host(CPU)
cudaMemcpy(h_b, d_b, memSize, cudaMemcpyDeviceToHost);
// Print output from device (GPU)
for (int i = 0; i < N; i++)
cout << h_b[i] << "\n";
// Free host (CPU) memory
free(h_a);
free(h_b);
// Free device (GPU) memory
cudaFree(d_a);
cudaFree(d_b);
// Exit with success!
return 1;
}
|
e95da4bbeca6dc9a96c336faddaec1af622b02fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx SystemML.cu
***********************************/
#include <cfloat>
// dim => rlen (Assumption: rlen == clen)
// N = length of dense array
extern "C"
__global__ void copyUpperToLowerTriangleDense(double* ret, int dim, int N) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int id_dest = iy * dim + ix;
if(iy > ix && id_dest < N) {
// TODO: Potential to reduce the number of threads by half
int id_src = ix * dim + iy;
ret[id_dest] = ret[id_src];
}
}
extern "C"
__device__ double getBoolean(int val) {
if(val == 0)
return 0.0;
else
return 1.0;
}
// op = {0=plus, 1=minus, 2=multiply, 3=divide, 4=power,
// 5=less, 6=lessequal, 7=greater, 8=greaterequal, 9=equal, 10=notequal,
// 11=min, 12=max, 13=and, 14=or, 15=log}
extern "C"
__device__ double binaryOp(double x, double y, int op) {
// 0=plus, 1=minus, 2=multiply, 3=divide, 4=power
if(op == 0)
return x + y;
else if(op == 1)
return x - y;
else if(op == 2)
return x * y;
else if(op == 3)
return x / y;
else if(op == 4)
return pow(x, y);
// 5=less, 6=lessequal, 7=greater, 8=greaterequal, 9=equal, 10=notequal,
else if(op == 5)
return getBoolean(x < y);
else if(op == 6)
return getBoolean(x <= y);
else if(op == 7)
return getBoolean(x > y);
else if(op == 8)
return getBoolean(x >= y);
else if(op == 9)
return getBoolean(x == y);
else if(op == 10)
return getBoolean(x != y);
// 11=min, 12=max, 13=and, 14=or, 15=log
else if(op == 11) {
return min(x, y);
}
else if(op == 12) {
return max(x, y);
}
return -999;
}
extern "C"
__global__ void dense_matrix_set(double* A, double scalar, int rlen, int clen) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int index = ix * clen + iy;
if(index < rlen*clen) {
A[index] = scalar;
}
}
extern "C"
__global__ void dense_matrix_copy(double* A, double* ret, int rlen, int clen) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int index = ix * clen + iy;
if(ix < rlen && iy < clen) {
ret[index] = A[index];
}
}
extern "C"
__global__ void relu(double* A, double* ret, int rlen, int clen) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if(ix < rlen && iy < clen) {
int index = ix * clen + iy;
ret[index] = max(0.0, A[index]);
}
}
// Compares the value and set
extern "C"
__global__ void compareAndSet(double* A, double* ret, int rlen, int clen, double compareVal, double tol, double ifEqualsVal, double ifLessThanVal, double ifGreaterThanVal) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int index = ix * clen + iy;
if(ix < rlen && iy < clen) {
if(abs(A[index]-compareVal) < tol)
ret[index] = ifEqualsVal;
else if(A[index] < compareVal)
ret[index] = ifLessThanVal;
else
ret[index] = ifGreaterThanVal;
}
}
extern "C"
__global__ void binCellOp(double* A, double* B, double* C,
int maxRlen, int maxClen, int vectorAStatus, int vectorBStatus, int op) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if(ix < maxRlen && iy < maxClen) {
int outIndex = ix * maxClen + iy;
int aIndex = outIndex;
int bIndex = outIndex;
if(vectorAStatus == 1)
aIndex = ix; // clen == 1
else if(vectorAStatus == 2)
aIndex = iy; // rlen == 1
if(vectorBStatus == 1)
bIndex = ix; // clen == 1
else if(vectorBStatus == 2)
bIndex = iy; // rlen == 1
C[outIndex] = binaryOp(A[aIndex], B[bIndex], op);
// printf("C[%d] = A[%d](%f) B[%d](%f) (%d %d)\n", outIndex, aIndex, A[aIndex], bIndex, B[bIndex], (ix+1), (iy+1));
}
}
extern "C"
__global__ void binCellScalarOp(double* A, double scalar, double* C, int rlenA, int clenA, int op, int isLeftScalar) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int index = ix * clenA + iy;
if(index < rlenA*clenA) {
if(isLeftScalar)
C[index] = binaryOp(scalar, A[index], op);
else
C[index] = binaryOp(A[index], scalar, op);
}
}
/**
* Sets all elements (fills) of a double array of given length with a given scalar value
* @param A array to be filled
* @param scalar value to fill array with
* @param lenA length of array A
*/
extern "C"
__global__ void fill(double* A, double scalar, int lenA) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < lenA){
A[index] = scalar;
}
}
/**
* Does a reduce operation over all elements of the array.
* This method has been adapted from the Reduction sample in the NVIDIA CUDA Samples (v8.0)
* and the Reduction example available through jcuda.org
* When invoked initially, all blocks partly compute the reduction operation over the entire array
* and writes it to the output/temporary array. A second invokation needs to happen to get the
* reduced value.
* The number of threads, blocks and amount of shared memory is calculated in a specific way.
* Please refer to the NVIDIA CUDA Sample or the SystemML code that invokes this method to see
* how its done.
* The template-ized version of this function is similar to what is found in NVIDIA CUB
*
* @param ReductionOp Type of the functor object that implements the reduction operation
*/
template <typename ReductionOp>
__device__ void reduce(
double *g_idata, ///< input data stored in device memory (of size n)
double *g_odata, ///< output/temporary array stode in device memory (of size n)
unsigned int n, ///< size of the input and temporary/output arrays
ReductionOp reduction_op, ///< Reduction operation to perform (functor object)
double initialValue) ///< initial value for the reduction variable
{
extern __shared__ double sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x*2 + threadIdx.x;
unsigned int gridSize = blockDim.x*2*gridDim.x;
double v = initialValue;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
v = reduction_op(v, g_idata[i]);
// ensure we don't read out of bounds
if (i + blockDim.x < n)
v = reduction_op(v, g_idata[i+blockDim.x]);
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = v;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = v = reduction_op(v, sdata[tid + 256]); } __syncthreads(); }
if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = v = reduction_op(v, sdata[tid + 128]); } __syncthreads(); }
if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = v = reduction_op(v, sdata[tid + 64]); } __syncthreads(); }
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile double* smem = sdata;
if (blockDim.x >= 64) { smem[tid] = v = reduction_op(v, smem[tid + 32]); }
if (blockDim.x >= 32) { smem[tid] = v = reduction_op(v, smem[tid + 16]); }
if (blockDim.x >= 16) { smem[tid] = v = reduction_op(v, smem[tid + 8]); }
if (blockDim.x >= 8) { smem[tid] = v = reduction_op(v, smem[tid + 4]); }
if (blockDim.x >= 4) { smem[tid] = v = reduction_op(v, smem[tid + 2]); }
if (blockDim.x >= 2) { smem[tid] = v = reduction_op(v, smem[tid + 1]); }
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
/**
* Does a reduce (sum) over each row of the array.
* This kernel must be launched with as many blocks as there are rows.
* The intuition for this kernel is that each block does a reduction over a single row.
* The maximum number of blocks that can launched (as of compute capability 3.0) is 2^31 - 1
* This works out fine for SystemML, since the maximum elements in a Java array can be 2^31 - c (some small constant)
* If the matrix is "fat" and "short", i.e. there are small number of rows and a large number of columns,
* there could be under-utilization of the hardware.
* @param g_idata input matrix stored in device memory
* @param g_odata output vector of size [rows * 1] in device memory
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_row(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols)
{
extern __shared__ double sdata[];
// one block per row
if (blockIdx.x >= rows) {
return;
}
unsigned int block = blockIdx.x;
unsigned int tid = threadIdx.x;
unsigned int i = tid;
unsigned int block_offset = block * cols;
double v = 0;
while (i < cols){
v += g_idata[block_offset + i];
i += blockDim.x;
}
// each thread puts its local sum into shared memory
sdata[tid] = v;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = v = v + sdata[tid + 256]; } __syncthreads(); }
if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = v = v + sdata[tid + 128]; } __syncthreads(); }
if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = v = v + sdata[tid + 64]; } __syncthreads(); }
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile double* smem = sdata;
if (blockDim.x >= 64) { smem[tid] = v = v + smem[tid + 32]; }
if (blockDim.x >= 32) { smem[tid] = v = v + smem[tid + 16]; }
if (blockDim.x >= 16) { smem[tid] = v = v + smem[tid + 8]; }
if (blockDim.x >= 8) { smem[tid] = v = v + smem[tid + 4]; }
if (blockDim.x >= 4) { smem[tid] = v = v + smem[tid + 2]; }
if (blockDim.x >= 2) { smem[tid] = v = v + smem[tid + 1]; }
}
// write result for this block to global mem
if (tid == 0)
g_odata[block] = sdata[0];
}
/**
* Does a column wise reduction.
* The intuition is that there are as many global threads as there are columns
* Each global thread is responsible for a single element in the output vector
* This of course leads to a under-utilization of the GPU resources.
* For cases, where the number of columns is small, there can be unused SMs
* @param g_idata input matrix stored in device memory
* @param g_odata output vector of size [1 * cols] in device memory
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_col(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols)
{
unsigned int global_tid = blockIdx.x * blockDim.x + threadIdx.x;
if (global_tid >= cols) {
return;
}
unsigned int i = global_tid;
unsigned int grid_size = cols;
double val = 0;
while (i < rows * cols) {
val += g_idata[i];
i += grid_size;
}
g_odata[global_tid] = val;
}
/**
* Functor op for summation operation
*/
typedef struct {
__device__ __forceinline__
double operator()(double a, double b) const {
return a + b;
}
} SumOp;
/**
* Do a summation over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
extern "C"
__global__ void reduce_sum(double *g_idata, double *g_odata, unsigned int n){
SumOp op;
reduce<SumOp>(g_idata, g_odata, n, op, 0.0);
}
/**
* Functor op for max operation
*/
typedef struct {
__device__ __forceinline__
double operator()(double a, double b) const {
return fmax(a, b);
}
} MaxOp;
/**
* Do a max over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
extern "C"
__global__ void reduce_max(double *g_idata, double *g_odata, unsigned int n){
MaxOp op;
reduce<MaxOp>(g_idata, g_odata, n, op, DBL_MIN);
}
/**
* Functor op for min operation
*/
typedef struct {
__device__ __forceinline__
double operator()(double a, double b) const {
return fmin(a, b);
}
} MinOp;
/**
* Do a min over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
extern "C"
__global__ void reduce_min(double *g_idata, double *g_odata, unsigned int n){
MinOp op;
reduce<MinOp>(g_idata, g_odata, n, op, DBL_MAX);
}
| e95da4bbeca6dc9a96c336faddaec1af622b02fa.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx SystemML.cu
***********************************/
#include <cfloat>
// dim => rlen (Assumption: rlen == clen)
// N = length of dense array
extern "C"
__global__ void copyUpperToLowerTriangleDense(double* ret, int dim, int N) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int id_dest = iy * dim + ix;
if(iy > ix && id_dest < N) {
// TODO: Potential to reduce the number of threads by half
int id_src = ix * dim + iy;
ret[id_dest] = ret[id_src];
}
}
extern "C"
__device__ double getBoolean(int val) {
if(val == 0)
return 0.0;
else
return 1.0;
}
// op = {0=plus, 1=minus, 2=multiply, 3=divide, 4=power,
// 5=less, 6=lessequal, 7=greater, 8=greaterequal, 9=equal, 10=notequal,
// 11=min, 12=max, 13=and, 14=or, 15=log}
extern "C"
__device__ double binaryOp(double x, double y, int op) {
// 0=plus, 1=minus, 2=multiply, 3=divide, 4=power
if(op == 0)
return x + y;
else if(op == 1)
return x - y;
else if(op == 2)
return x * y;
else if(op == 3)
return x / y;
else if(op == 4)
return pow(x, y);
// 5=less, 6=lessequal, 7=greater, 8=greaterequal, 9=equal, 10=notequal,
else if(op == 5)
return getBoolean(x < y);
else if(op == 6)
return getBoolean(x <= y);
else if(op == 7)
return getBoolean(x > y);
else if(op == 8)
return getBoolean(x >= y);
else if(op == 9)
return getBoolean(x == y);
else if(op == 10)
return getBoolean(x != y);
// 11=min, 12=max, 13=and, 14=or, 15=log
else if(op == 11) {
return min(x, y);
}
else if(op == 12) {
return max(x, y);
}
return -999;
}
extern "C"
__global__ void dense_matrix_set(double* A, double scalar, int rlen, int clen) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int index = ix * clen + iy;
if(index < rlen*clen) {
A[index] = scalar;
}
}
extern "C"
__global__ void dense_matrix_copy(double* A, double* ret, int rlen, int clen) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int index = ix * clen + iy;
if(ix < rlen && iy < clen) {
ret[index] = A[index];
}
}
extern "C"
__global__ void relu(double* A, double* ret, int rlen, int clen) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if(ix < rlen && iy < clen) {
int index = ix * clen + iy;
ret[index] = max(0.0, A[index]);
}
}
// Compares the value and set
extern "C"
__global__ void compareAndSet(double* A, double* ret, int rlen, int clen, double compareVal, double tol, double ifEqualsVal, double ifLessThanVal, double ifGreaterThanVal) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int index = ix * clen + iy;
if(ix < rlen && iy < clen) {
if(abs(A[index]-compareVal) < tol)
ret[index] = ifEqualsVal;
else if(A[index] < compareVal)
ret[index] = ifLessThanVal;
else
ret[index] = ifGreaterThanVal;
}
}
extern "C"
__global__ void binCellOp(double* A, double* B, double* C,
int maxRlen, int maxClen, int vectorAStatus, int vectorBStatus, int op) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if(ix < maxRlen && iy < maxClen) {
int outIndex = ix * maxClen + iy;
int aIndex = outIndex;
int bIndex = outIndex;
if(vectorAStatus == 1)
aIndex = ix; // clen == 1
else if(vectorAStatus == 2)
aIndex = iy; // rlen == 1
if(vectorBStatus == 1)
bIndex = ix; // clen == 1
else if(vectorBStatus == 2)
bIndex = iy; // rlen == 1
C[outIndex] = binaryOp(A[aIndex], B[bIndex], op);
// printf("C[%d] = A[%d](%f) B[%d](%f) (%d %d)\n", outIndex, aIndex, A[aIndex], bIndex, B[bIndex], (ix+1), (iy+1));
}
}
extern "C"
__global__ void binCellScalarOp(double* A, double scalar, double* C, int rlenA, int clenA, int op, int isLeftScalar) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int index = ix * clenA + iy;
if(index < rlenA*clenA) {
if(isLeftScalar)
C[index] = binaryOp(scalar, A[index], op);
else
C[index] = binaryOp(A[index], scalar, op);
}
}
/**
* Sets all elements (fills) of a double array of given length with a given scalar value
* @param A array to be filled
* @param scalar value to fill array with
* @param lenA length of array A
*/
extern "C"
__global__ void fill(double* A, double scalar, int lenA) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < lenA){
A[index] = scalar;
}
}
/**
* Does a reduce operation over all elements of the array.
* This method has been adapted from the Reduction sample in the NVIDIA CUDA Samples (v8.0)
* and the Reduction example available through jcuda.org
* When invoked initially, all blocks partly compute the reduction operation over the entire array
* and writes it to the output/temporary array. A second invokation needs to happen to get the
* reduced value.
* The number of threads, blocks and amount of shared memory is calculated in a specific way.
* Please refer to the NVIDIA CUDA Sample or the SystemML code that invokes this method to see
* how its done.
* The template-ized version of this function is similar to what is found in NVIDIA CUB
*
* @param ReductionOp Type of the functor object that implements the reduction operation
*/
template <typename ReductionOp>
__device__ void reduce(
double *g_idata, ///< input data stored in device memory (of size n)
double *g_odata, ///< output/temporary array stode in device memory (of size n)
unsigned int n, ///< size of the input and temporary/output arrays
ReductionOp reduction_op, ///< Reduction operation to perform (functor object)
double initialValue) ///< initial value for the reduction variable
{
extern __shared__ double sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x*2 + threadIdx.x;
unsigned int gridSize = blockDim.x*2*gridDim.x;
double v = initialValue;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
v = reduction_op(v, g_idata[i]);
// ensure we don't read out of bounds
if (i + blockDim.x < n)
v = reduction_op(v, g_idata[i+blockDim.x]);
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = v;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = v = reduction_op(v, sdata[tid + 256]); } __syncthreads(); }
if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = v = reduction_op(v, sdata[tid + 128]); } __syncthreads(); }
if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = v = reduction_op(v, sdata[tid + 64]); } __syncthreads(); }
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile double* smem = sdata;
if (blockDim.x >= 64) { smem[tid] = v = reduction_op(v, smem[tid + 32]); }
if (blockDim.x >= 32) { smem[tid] = v = reduction_op(v, smem[tid + 16]); }
if (blockDim.x >= 16) { smem[tid] = v = reduction_op(v, smem[tid + 8]); }
if (blockDim.x >= 8) { smem[tid] = v = reduction_op(v, smem[tid + 4]); }
if (blockDim.x >= 4) { smem[tid] = v = reduction_op(v, smem[tid + 2]); }
if (blockDim.x >= 2) { smem[tid] = v = reduction_op(v, smem[tid + 1]); }
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
/**
* Does a reduce (sum) over each row of the array.
* This kernel must be launched with as many blocks as there are rows.
* The intuition for this kernel is that each block does a reduction over a single row.
* The maximum number of blocks that can launched (as of compute capability 3.0) is 2^31 - 1
* This works out fine for SystemML, since the maximum elements in a Java array can be 2^31 - c (some small constant)
* If the matrix is "fat" and "short", i.e. there are small number of rows and a large number of columns,
* there could be under-utilization of the hardware.
* @param g_idata input matrix stored in device memory
* @param g_odata output vector of size [rows * 1] in device memory
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_row(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols)
{
extern __shared__ double sdata[];
// one block per row
if (blockIdx.x >= rows) {
return;
}
unsigned int block = blockIdx.x;
unsigned int tid = threadIdx.x;
unsigned int i = tid;
unsigned int block_offset = block * cols;
double v = 0;
while (i < cols){
v += g_idata[block_offset + i];
i += blockDim.x;
}
// each thread puts its local sum into shared memory
sdata[tid] = v;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = v = v + sdata[tid + 256]; } __syncthreads(); }
if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = v = v + sdata[tid + 128]; } __syncthreads(); }
if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = v = v + sdata[tid + 64]; } __syncthreads(); }
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile double* smem = sdata;
if (blockDim.x >= 64) { smem[tid] = v = v + smem[tid + 32]; }
if (blockDim.x >= 32) { smem[tid] = v = v + smem[tid + 16]; }
if (blockDim.x >= 16) { smem[tid] = v = v + smem[tid + 8]; }
if (blockDim.x >= 8) { smem[tid] = v = v + smem[tid + 4]; }
if (blockDim.x >= 4) { smem[tid] = v = v + smem[tid + 2]; }
if (blockDim.x >= 2) { smem[tid] = v = v + smem[tid + 1]; }
}
// write result for this block to global mem
if (tid == 0)
g_odata[block] = sdata[0];
}
/**
* Does a column wise reduction.
* The intuition is that there are as many global threads as there are columns
* Each global thread is responsible for a single element in the output vector
* This of course leads to a under-utilization of the GPU resources.
* For cases, where the number of columns is small, there can be unused SMs
* @param g_idata input matrix stored in device memory
* @param g_odata output vector of size [1 * cols] in device memory
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_col(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols)
{
unsigned int global_tid = blockIdx.x * blockDim.x + threadIdx.x;
if (global_tid >= cols) {
return;
}
unsigned int i = global_tid;
unsigned int grid_size = cols;
double val = 0;
while (i < rows * cols) {
val += g_idata[i];
i += grid_size;
}
g_odata[global_tid] = val;
}
/**
* Functor op for summation operation
*/
typedef struct {
__device__ __forceinline__
double operator()(double a, double b) const {
return a + b;
}
} SumOp;
/**
* Do a summation over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
extern "C"
__global__ void reduce_sum(double *g_idata, double *g_odata, unsigned int n){
SumOp op;
reduce<SumOp>(g_idata, g_odata, n, op, 0.0);
}
/**
* Functor op for max operation
*/
typedef struct {
__device__ __forceinline__
double operator()(double a, double b) const {
return fmax(a, b);
}
} MaxOp;
/**
* Do a max over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
extern "C"
__global__ void reduce_max(double *g_idata, double *g_odata, unsigned int n){
MaxOp op;
reduce<MaxOp>(g_idata, g_odata, n, op, DBL_MIN);
}
/**
* Functor op for min operation
*/
typedef struct {
__device__ __forceinline__
double operator()(double a, double b) const {
return fmin(a, b);
}
} MinOp;
/**
* Do a min over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
extern "C"
__global__ void reduce_min(double *g_idata, double *g_odata, unsigned int n){
MinOp op;
reduce<MinOp>(g_idata, g_odata, n, op, DBL_MAX);
}
|
00a65bd0896a4612df04cafb7af35acb433a7a34.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@author Stan Tomov
@generated from zgemv_conjv.cu normal z -> c, Fri Jan 30 19:00:08 2015
*/
#include "common_magma.h"
#include "commonblas_c.h"
#define PRECISION_c
#define num_threads 256
__global__ void
cgemv_conjv_kernel(
int m, int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy)
{
int ind = blockIdx.x*num_threads + threadIdx.x;
A += ind;
if ( ind < m ) {
magmaFloatComplex res = MAGMA_C_ZERO;
#pragma unroll
for( int i=0; i < n; i ++ ) {
res += A[0] * MAGMA_C_CNJG(x[0]);
A += lda;
x += incx;
}
y[ind*incy] = alpha * res + beta * y[ind*incy];
}
}
/**
Purpose
-------
CGEMV_CONJV performs the matrix-vector operation
y := alpha*A*conj(x) + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha COMPLEX
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of dimension ( LDA, n ) on the GPU.
@param[in]
lda INTEGER
LDA specifies the leading dimension of A.
@param[in]
dx COMPLEX array of dimension n
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta DOUBLE REAL
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy REAL array of dimension m
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@ingroup magma_cblas2
********************************************************************/
extern "C" void
magmablas_cgemv_conjv(
magma_int_t m, magma_int_t n, magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy)
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < m )
info = -5;
else if ( incx == 0 )
info = -7;
else if ( incy == 0 )
info = -10;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t blocks = (m - 1)/num_threads + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(num_threads, 1, 1);
hipLaunchKernelGGL(( cgemv_conjv_kernel), dim3(grid), dim3(threads), 0, magma_stream ,
m, n, alpha, dA, ldda, dx, incx, beta, dy, incy);
}
#undef num_threads
| 00a65bd0896a4612df04cafb7af35acb433a7a34.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@author Stan Tomov
@generated from zgemv_conjv.cu normal z -> c, Fri Jan 30 19:00:08 2015
*/
#include "common_magma.h"
#include "commonblas_c.h"
#define PRECISION_c
#define num_threads 256
__global__ void
cgemv_conjv_kernel(
int m, int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy)
{
int ind = blockIdx.x*num_threads + threadIdx.x;
A += ind;
if ( ind < m ) {
magmaFloatComplex res = MAGMA_C_ZERO;
#pragma unroll
for( int i=0; i < n; i ++ ) {
res += A[0] * MAGMA_C_CNJG(x[0]);
A += lda;
x += incx;
}
y[ind*incy] = alpha * res + beta * y[ind*incy];
}
}
/**
Purpose
-------
CGEMV_CONJV performs the matrix-vector operation
y := alpha*A*conj(x) + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha COMPLEX
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of dimension ( LDA, n ) on the GPU.
@param[in]
lda INTEGER
LDA specifies the leading dimension of A.
@param[in]
dx COMPLEX array of dimension n
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta DOUBLE REAL
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy REAL array of dimension m
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@ingroup magma_cblas2
********************************************************************/
extern "C" void
magmablas_cgemv_conjv(
magma_int_t m, magma_int_t n, magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy)
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < m )
info = -5;
else if ( incx == 0 )
info = -7;
else if ( incy == 0 )
info = -10;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t blocks = (m - 1)/num_threads + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(num_threads, 1, 1);
cgemv_conjv_kernel<<< grid, threads, 0, magma_stream >>>
(m, n, alpha, dA, ldda, dx, incx, beta, dy, incy);
}
#undef num_threads
|
f73fa63cf259ff5a30af743b5dc0d45109c3b7e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "KMeansGPU.h"
#include "CommonKernels.h"
#include "../common/Utils.h"
#include <cfloat>
std::vector<unsigned> performKMeansOnGPU(float * data_device, unsigned number_of_rows, unsigned number_of_columns,
unsigned number_of_clusters, const std::vector<unsigned>& initial_centroids,
unsigned number_of_iterations) {
// perform device query - get number of SMs, execution configuration
unsigned number_of_multiprocessors = getNumberOfStreamingMultiprocessors();
unsigned block_size = 512;
unsigned number_of_blocks = number_of_multiprocessors * 2;
std::cout << "[DEVICE] Execution configuration for KMeans: (blocks: " << number_of_blocks << ", threads: " << block_size << ")" << std::endl;
// create time-measurement events
hipEvent_t time_start, time_stop;
handleCudaErrors( hipEventCreate(&time_start) );
handleCudaErrors( hipEventCreate(&time_stop) );
// record the starting event
handleCudaErrors( hipEventRecord(time_start) );
// allocate memory for centroids on GPU
float * centroids_device = nullptr;
unsigned centroids_memory_size = number_of_clusters * number_of_columns * sizeof(float);
handleCudaErrors( hipMalloc(¢roids_device, centroids_memory_size) );
// copy initial centroids data using given indexes
for(unsigned i = 0; i < number_of_clusters; ++i) {
handleCudaErrors( hipMemcpy(¢roids_device[i * number_of_columns], &data_device[initial_centroids.at(i) * number_of_columns], number_of_columns * sizeof(float), hipMemcpyDeviceToDevice) );
}
// allocate memory for clusters count on GPU, do not initialize (will do it in each iteration)
unsigned * clusters_count_device = nullptr;
handleCudaErrors( hipMalloc(&clusters_count_device, number_of_clusters * sizeof(unsigned)) );
// allocate memory for closest cluster index on device, initialize to 0 (in case no iterations are performed)
unsigned * closest_clusters_device = nullptr;
handleCudaErrors( hipMalloc(&closest_clusters_device, number_of_rows * sizeof(unsigned)) );
handleCudaErrors( hipMemset(closest_clusters_device, 0, number_of_rows * sizeof(unsigned)));
// allocate memory for new centroids on GPU, do not initialize (no need)
float * clusters_coordinates_sums = nullptr;
handleCudaErrors( hipMalloc(&clusters_coordinates_sums, centroids_memory_size) );
// create special array of floats initialized to 0, that will be used to set centroids memory to zero
// in each iteration using hipMemset
float * zero_filled_memory = nullptr;
handleCudaErrors( hipMalloc(&zero_filled_memory, centroids_memory_size) );
hipLaunchKernelGGL(( zeroInitializeFloatMemory), dim3(1), dim3(1), 0, 0, zero_filled_memory, number_of_clusters * number_of_columns);
handleCudaErrors( hipGetLastError() );
handleCudaErrors( hipDeviceSynchronize() );
// perform some number of iteration
for(unsigned iteration = 0; iteration < number_of_iterations; ++iteration)
{
// compute distances from each point to each cluster, assign closest cluster to each point
hipLaunchKernelGGL(( computeDistancesAndGetClosestClusters), dim3(number_of_blocks), dim3(block_size), 0, 0, data_device, centroids_device,
number_of_rows, number_of_columns,
number_of_clusters, closest_clusters_device);
handleCudaErrors( hipGetLastError() );
handleCudaErrors( hipDeviceSynchronize() );
// zero-initialize cluster coordinates sums and cluster count arrays (there will be addition performed there)
handleCudaErrors( hipMemcpy(clusters_coordinates_sums, zero_filled_memory, centroids_memory_size, hipMemcpyDeviceToDevice) );
handleCudaErrors( hipMemset(clusters_count_device, 0, number_of_clusters * sizeof(unsigned)) );
// sum coordinates of each centroid and count number of points for each centroid
hipLaunchKernelGGL(( computeClustersCoordinatesSums), dim3(number_of_blocks), dim3(block_size), 0, 0, data_device, clusters_coordinates_sums,
clusters_count_device, closest_clusters_device,
number_of_rows, number_of_columns);
handleCudaErrors( hipGetLastError() );
handleCudaErrors( hipDeviceSynchronize() );
// compute new centroids using sums acquired in previous call
hipLaunchKernelGGL(( computeNewCentroids), dim3(number_of_blocks), dim3(block_size), 0, 0, centroids_device, clusters_coordinates_sums,
clusters_count_device,
number_of_columns, number_of_clusters);
handleCudaErrors( hipGetLastError() );
handleCudaErrors( hipDeviceSynchronize() );
}
// move clusters assignment to host
std::vector<unsigned> clusters_assignment(number_of_rows);
handleCudaErrors( hipMemcpy(clusters_assignment.data(), closest_clusters_device, number_of_rows * sizeof(unsigned), hipMemcpyDeviceToHost) );
// record the stopping event and synchronize
handleCudaErrors( hipEventRecord(time_stop) );
handleCudaErrors( hipEventSynchronize(time_stop) );
// compute and display elapsed time
float elapsed_time = 0.0f;
handleCudaErrors( hipEventElapsedTime(&elapsed_time, time_start, time_stop) );
std::cout << "[DEVICE] KMeans - total elapsed time according to CUDA events: " << elapsed_time << " ms" << std::endl;
// destroy the event objects
handleCudaErrors( hipEventDestroy(time_start) );
handleCudaErrors( hipEventDestroy(time_stop) );
// free GPU memory
handleCudaErrors( hipFree(closest_clusters_device) );
handleCudaErrors( hipFree(centroids_device) );
// return vector with clusters numbers assigned to each dataset entry
return clusters_assignment;
}
__global__ void computeDistancesAndGetClosestClusters(float * data, float * centroids,
unsigned input_size, unsigned row_size,
unsigned number_of_centroids,
unsigned * closest_clusters) {
unsigned start = blockIdx.x * blockDim.x + threadIdx.x;
unsigned stride = blockDim.x * gridDim.x;
// grid-stride loop
for(unsigned i = start; i < input_size; i += stride) {
unsigned data_index = i * row_size;
float smallest_distance = FLT_MAX;
unsigned closest_cluster = 0;
// iterate over each centroid
for(unsigned centroid = 0; centroid < number_of_centroids; ++centroid) {
float distance = 0.0f;
// iterate over each column in dataset
for(unsigned column = 0; column < row_size; ++column) {
distance += powf(centroids[centroid * row_size + column] - data[data_index + column], 2.0f);
}
distance = sqrt(distance);
if(distance < smallest_distance) {
smallest_distance = distance;
closest_cluster = centroid;
}
}
closest_clusters[i] = closest_cluster;
}
}
__global__ void computeClustersCoordinatesSums(float * data, float * new_centroids_sums, unsigned * cluster_count,
const unsigned * closest_clusters,
unsigned number_of_rows, unsigned number_of_columns) {
unsigned start = blockIdx.x * blockDim.x + threadIdx.x;
unsigned stride = blockDim.x * gridDim.x;
for(unsigned i = start; i < number_of_rows; i += stride) {
unsigned cluster_index = closest_clusters[i];
unsigned data_index = i * number_of_columns;
for(unsigned column = 0; column < number_of_columns; ++column) {
atomicAdd(&new_centroids_sums[cluster_index * number_of_columns + column], data[data_index + column]);
}
atomicAdd(&cluster_count[cluster_index], 1);
}
}
__global__ void computeNewCentroids(float * centroids, const float * new_centroids_sums,
const unsigned * cluster_count,
unsigned number_of_columns, unsigned number_of_clusters) {
unsigned start = blockIdx.x * blockDim.x + threadIdx.x;
unsigned stride = blockDim.x * gridDim.x;
for(unsigned i = start; i < number_of_clusters; i += stride) {
unsigned index = i * number_of_columns;
for(unsigned column = 0; column < number_of_columns; ++column) {
if(cluster_count[i] != 0) {
centroids[index + column] = new_centroids_sums[index + column] / static_cast<float>(cluster_count[i]);
} // else no change, because the cluster is empty
}
}
}
| f73fa63cf259ff5a30af743b5dc0d45109c3b7e2.cu | #include "KMeansGPU.h"
#include "CommonKernels.h"
#include "../common/Utils.h"
#include <cfloat>
std::vector<unsigned> performKMeansOnGPU(float * data_device, unsigned number_of_rows, unsigned number_of_columns,
unsigned number_of_clusters, const std::vector<unsigned>& initial_centroids,
unsigned number_of_iterations) {
// perform device query - get number of SMs, execution configuration
unsigned number_of_multiprocessors = getNumberOfStreamingMultiprocessors();
unsigned block_size = 512;
unsigned number_of_blocks = number_of_multiprocessors * 2;
std::cout << "[DEVICE] Execution configuration for KMeans: (blocks: " << number_of_blocks << ", threads: " << block_size << ")" << std::endl;
// create time-measurement events
cudaEvent_t time_start, time_stop;
handleCudaErrors( cudaEventCreate(&time_start) );
handleCudaErrors( cudaEventCreate(&time_stop) );
// record the starting event
handleCudaErrors( cudaEventRecord(time_start) );
// allocate memory for centroids on GPU
float * centroids_device = nullptr;
unsigned centroids_memory_size = number_of_clusters * number_of_columns * sizeof(float);
handleCudaErrors( cudaMalloc(¢roids_device, centroids_memory_size) );
// copy initial centroids data using given indexes
for(unsigned i = 0; i < number_of_clusters; ++i) {
handleCudaErrors( cudaMemcpy(¢roids_device[i * number_of_columns], &data_device[initial_centroids.at(i) * number_of_columns], number_of_columns * sizeof(float), cudaMemcpyDeviceToDevice) );
}
// allocate memory for clusters count on GPU, do not initialize (will do it in each iteration)
unsigned * clusters_count_device = nullptr;
handleCudaErrors( cudaMalloc(&clusters_count_device, number_of_clusters * sizeof(unsigned)) );
// allocate memory for closest cluster index on device, initialize to 0 (in case no iterations are performed)
unsigned * closest_clusters_device = nullptr;
handleCudaErrors( cudaMalloc(&closest_clusters_device, number_of_rows * sizeof(unsigned)) );
handleCudaErrors( cudaMemset(closest_clusters_device, 0, number_of_rows * sizeof(unsigned)));
// allocate memory for new centroids on GPU, do not initialize (no need)
float * clusters_coordinates_sums = nullptr;
handleCudaErrors( cudaMalloc(&clusters_coordinates_sums, centroids_memory_size) );
// create special array of floats initialized to 0, that will be used to set centroids memory to zero
// in each iteration using cudaMemset
float * zero_filled_memory = nullptr;
handleCudaErrors( cudaMalloc(&zero_filled_memory, centroids_memory_size) );
zeroInitializeFloatMemory<<<1, 1>>>(zero_filled_memory, number_of_clusters * number_of_columns);
handleCudaErrors( cudaGetLastError() );
handleCudaErrors( cudaDeviceSynchronize() );
// perform some number of iteration
for(unsigned iteration = 0; iteration < number_of_iterations; ++iteration)
{
// compute distances from each point to each cluster, assign closest cluster to each point
computeDistancesAndGetClosestClusters<<<number_of_blocks, block_size>>>(data_device, centroids_device,
number_of_rows, number_of_columns,
number_of_clusters, closest_clusters_device);
handleCudaErrors( cudaGetLastError() );
handleCudaErrors( cudaDeviceSynchronize() );
// zero-initialize cluster coordinates sums and cluster count arrays (there will be addition performed there)
handleCudaErrors( cudaMemcpy(clusters_coordinates_sums, zero_filled_memory, centroids_memory_size, cudaMemcpyDeviceToDevice) );
handleCudaErrors( cudaMemset(clusters_count_device, 0, number_of_clusters * sizeof(unsigned)) );
// sum coordinates of each centroid and count number of points for each centroid
computeClustersCoordinatesSums<<<number_of_blocks, block_size>>>(data_device, clusters_coordinates_sums,
clusters_count_device, closest_clusters_device,
number_of_rows, number_of_columns);
handleCudaErrors( cudaGetLastError() );
handleCudaErrors( cudaDeviceSynchronize() );
// compute new centroids using sums acquired in previous call
computeNewCentroids<<<number_of_blocks, block_size>>>(centroids_device, clusters_coordinates_sums,
clusters_count_device,
number_of_columns, number_of_clusters);
handleCudaErrors( cudaGetLastError() );
handleCudaErrors( cudaDeviceSynchronize() );
}
// move clusters assignment to host
std::vector<unsigned> clusters_assignment(number_of_rows);
handleCudaErrors( cudaMemcpy(clusters_assignment.data(), closest_clusters_device, number_of_rows * sizeof(unsigned), cudaMemcpyDeviceToHost) );
// record the stopping event and synchronize
handleCudaErrors( cudaEventRecord(time_stop) );
handleCudaErrors( cudaEventSynchronize(time_stop) );
// compute and display elapsed time
float elapsed_time = 0.0f;
handleCudaErrors( cudaEventElapsedTime(&elapsed_time, time_start, time_stop) );
std::cout << "[DEVICE] KMeans - total elapsed time according to CUDA events: " << elapsed_time << " ms" << std::endl;
// destroy the event objects
handleCudaErrors( cudaEventDestroy(time_start) );
handleCudaErrors( cudaEventDestroy(time_stop) );
// free GPU memory
handleCudaErrors( cudaFree(closest_clusters_device) );
handleCudaErrors( cudaFree(centroids_device) );
// return vector with clusters numbers assigned to each dataset entry
return clusters_assignment;
}
__global__ void computeDistancesAndGetClosestClusters(float * data, float * centroids,
unsigned input_size, unsigned row_size,
unsigned number_of_centroids,
unsigned * closest_clusters) {
unsigned start = blockIdx.x * blockDim.x + threadIdx.x;
unsigned stride = blockDim.x * gridDim.x;
// grid-stride loop
for(unsigned i = start; i < input_size; i += stride) {
unsigned data_index = i * row_size;
float smallest_distance = FLT_MAX;
unsigned closest_cluster = 0;
// iterate over each centroid
for(unsigned centroid = 0; centroid < number_of_centroids; ++centroid) {
float distance = 0.0f;
// iterate over each column in dataset
for(unsigned column = 0; column < row_size; ++column) {
distance += powf(centroids[centroid * row_size + column] - data[data_index + column], 2.0f);
}
distance = sqrt(distance);
if(distance < smallest_distance) {
smallest_distance = distance;
closest_cluster = centroid;
}
}
closest_clusters[i] = closest_cluster;
}
}
__global__ void computeClustersCoordinatesSums(float * data, float * new_centroids_sums, unsigned * cluster_count,
const unsigned * closest_clusters,
unsigned number_of_rows, unsigned number_of_columns) {
unsigned start = blockIdx.x * blockDim.x + threadIdx.x;
unsigned stride = blockDim.x * gridDim.x;
for(unsigned i = start; i < number_of_rows; i += stride) {
unsigned cluster_index = closest_clusters[i];
unsigned data_index = i * number_of_columns;
for(unsigned column = 0; column < number_of_columns; ++column) {
atomicAdd(&new_centroids_sums[cluster_index * number_of_columns + column], data[data_index + column]);
}
atomicAdd(&cluster_count[cluster_index], 1);
}
}
__global__ void computeNewCentroids(float * centroids, const float * new_centroids_sums,
const unsigned * cluster_count,
unsigned number_of_columns, unsigned number_of_clusters) {
unsigned start = blockIdx.x * blockDim.x + threadIdx.x;
unsigned stride = blockDim.x * gridDim.x;
for(unsigned i = start; i < number_of_clusters; i += stride) {
unsigned index = i * number_of_columns;
for(unsigned column = 0; column < number_of_columns; ++column) {
if(cluster_count[i] != 0) {
centroids[index + column] = new_centroids_sums[index + column] / static_cast<float>(cluster_count[i]);
} // else no change, because the cluster is empty
}
}
}
|
98bf69d305c01adc068117e0d340e7ade41dc1d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu.h"
//Got directly from "Accelerated Ray Tracing in One Weekend in CUDA" tutorial
void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) {
if (result) {
cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
hipDeviceReset();
exit(99);
}
}
void allocateGPUSharedMem(void ** ptr, int size){
checkCudaErrors(hipMallocManaged((void **)ptr, size));
}
void allocateGPUMem(void ** ptr, int size){
checkCudaErrors(hipMalloc((void **)ptr , size));
}
void freeGPUMem(void* ptr){
checkCudaErrors(hipFree(ptr));
}
void syncGPU(){
checkCudaErrors(hipDeviceSynchronize());
}
void checkGPUErrors(){
checkCudaErrors(hipGetLastError());
}
| 98bf69d305c01adc068117e0d340e7ade41dc1d1.cu | #include "gpu.h"
//Got directly from "Accelerated Ray Tracing in One Weekend in CUDA" tutorial
void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) {
if (result) {
cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(99);
}
}
void allocateGPUSharedMem(void ** ptr, int size){
checkCudaErrors(cudaMallocManaged((void **)ptr, size));
}
void allocateGPUMem(void ** ptr, int size){
checkCudaErrors(cudaMalloc((void **)ptr , size));
}
void freeGPUMem(void* ptr){
checkCudaErrors(cudaFree(ptr));
}
void syncGPU(){
checkCudaErrors(cudaDeviceSynchronize());
}
void checkGPUErrors(){
checkCudaErrors(cudaGetLastError());
}
|
b06cff9e435929a4ef026185f1015ecfb620e023.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/random_generator.h"
namespace oneflow {
namespace {
template<typename T>
void RngUniformGpu(const hiprandGenerator_t& gen, int64_t n, T* ret);
template<>
void RngUniformGpu<float>(const hiprandGenerator_t& gen, int64_t n, float* ret) {
CudaCheck(hiprandGenerateUniform(gen, ret, n));
}
template<>
void RngUniformGpu<double>(const hiprandGenerator_t& gen, int64_t n, double* ret) {
CudaCheck(hiprandGenerateUniformDouble(gen, ret, n));
}
} // namespace
RandomGenerator<DeviceType::kGPU>::RandomGenerator(int64_t seed, DeviceCtx* device_ctx) {
CHECK_NOTNULL(device_ctx);
CudaCheck(hiprandCreateGenerator(&curand_generator_, HIPRAND_RNG_PSEUDO_DEFAULT));
CudaCheck(hiprandSetPseudoRandomGeneratorSeed(curand_generator_, seed));
CudaCheck(hiprandSetStream(curand_generator_, device_ctx->cuda_stream()));
}
RandomGenerator<DeviceType::kGPU>::~RandomGenerator() {
CudaCheck(hiprandDestroyGenerator(curand_generator_));
}
template<typename T>
void RandomGenerator<DeviceType::kGPU>::Uniform(const int64_t elem_cnt, T* dptr) {
RngUniformGpu(curand_generator_, elem_cnt, dptr);
}
#define INITIATE_GPU_RANDOM_GENERATOR_UNIFORM(T, typeproto) \
template void RandomGenerator<DeviceType::kGPU>::Uniform<T>(const int64_t elem_cnt, T* dptr);
OF_PP_FOR_EACH_TUPLE(INITIATE_GPU_RANDOM_GENERATOR_UNIFORM, FLOATING_DATA_TYPE_SEQ);
} // namespace oneflow
| b06cff9e435929a4ef026185f1015ecfb620e023.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/random_generator.h"
namespace oneflow {
namespace {
template<typename T>
void RngUniformGpu(const curandGenerator_t& gen, int64_t n, T* ret);
template<>
void RngUniformGpu<float>(const curandGenerator_t& gen, int64_t n, float* ret) {
CudaCheck(curandGenerateUniform(gen, ret, n));
}
template<>
void RngUniformGpu<double>(const curandGenerator_t& gen, int64_t n, double* ret) {
CudaCheck(curandGenerateUniformDouble(gen, ret, n));
}
} // namespace
RandomGenerator<DeviceType::kGPU>::RandomGenerator(int64_t seed, DeviceCtx* device_ctx) {
CHECK_NOTNULL(device_ctx);
CudaCheck(curandCreateGenerator(&curand_generator_, CURAND_RNG_PSEUDO_DEFAULT));
CudaCheck(curandSetPseudoRandomGeneratorSeed(curand_generator_, seed));
CudaCheck(curandSetStream(curand_generator_, device_ctx->cuda_stream()));
}
RandomGenerator<DeviceType::kGPU>::~RandomGenerator() {
CudaCheck(curandDestroyGenerator(curand_generator_));
}
template<typename T>
void RandomGenerator<DeviceType::kGPU>::Uniform(const int64_t elem_cnt, T* dptr) {
RngUniformGpu(curand_generator_, elem_cnt, dptr);
}
#define INITIATE_GPU_RANDOM_GENERATOR_UNIFORM(T, typeproto) \
template void RandomGenerator<DeviceType::kGPU>::Uniform<T>(const int64_t elem_cnt, T* dptr);
OF_PP_FOR_EACH_TUPLE(INITIATE_GPU_RANDOM_GENERATOR_UNIFORM, FLOATING_DATA_TYPE_SEQ);
} // namespace oneflow
|
b75361d2e5d085240952b744c79d99f1c3c8b93b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels.h"
#include <thrust/device_vector.h>
using namespace cv::cuda;
inline __device__ __host__ int iDivUp( int a, int b ) { return (a % b != 0) ? (a / b + 1) : (a / b); }
__global__ void image2Matrix_kernel(int width, int height, PtrStepSz<uchar3> image, float* matrix){
const int w = blockIdx.x;
const int h = blockIdx.y;
float alpha = -127.5;
float beta = 0.0078125;
if (w < width && h < height)
{
uchar3 v = image(h,w);
*(matrix + 0*height*width + h*width + w) = (float(v.z)-alpha)*beta;
*(matrix + 1*height*width + h*width + w) = (float(v.y)-alpha)*beta;
*(matrix + 2*height*width + h*width + w) = (float(v.x)-alpha)*beta;
}
}
__global__ void image2Matrix_with_transpose_kernel(int width, int height, PtrStepSz<uchar3> image, float* matrix){
const int w = blockIdx.x;
const int h = blockIdx.y;
float alpha = -127.5;
float beta = 0.0078125;
if (w < width && h < height)
{
uchar3 v = image(w,h); //swap w and h to transpose
*(matrix + 0*height*width + h*width + w) = (float(v.z)-alpha)*beta;
*(matrix + 1*height*width + h*width + w) = (float(v.y)-alpha)*beta;
*(matrix + 2*height*width + h*width + w) = (float(v.x)-alpha)*beta;
}
}
void gpu_image2Matrix(int width, int height, cv::cuda::GpuMat & image, float* matrix, hipStream_t &stream)
{
/*
image : input image in GpuMat format, WHC arrangement and BGR order
matrix: gpu float array, CHW and RGB order
*/
dim3 block(width, height); // width * height blocks, 1 thread each
hipLaunchKernelGGL(( image2Matrix_kernel), dim3(block),dim3(1),0,stream, width,height,image,matrix);
}
void gpu_image2Matrix_with_transpose(int width, int height, cv::cuda::GpuMat & image, float* matrix, hipStream_t &stream)
{
dim3 block(width, height); // width * height blocks, 1 thread each
hipLaunchKernelGGL(( image2Matrix_with_transpose_kernel), dim3(block),dim3(1),0,stream, width,height,image,matrix);
}
//__global__ void crop_and_resize_kernel(int x1, int y1, int x2, int y2, int PtrStepSz<uchar3> image, int* temp_buffer)
//{
// const int x = blockDim.x*blockIdx.x+threadIdx.x;
// const int y = blockDim.y*blockIdx.y+threadIdx.y;
// if(x>=(x2-x1)||y>=(y2-y1))
// return;
// uchar3 v = image(y,x);
// temp_buffer[]
//}
//
//__global__ void generate_batch_kernel(int crop_size, int width, int height, int * boxes_data, PtrStepSz<uchar3> image, float * output_batch)
//{
// const int box_idx = blockIdx.x*blockDim.x+threadIdx.x;
// if(!boxes_data||!output_batch)
// return hipErrorInvalidDevicePointer;
// if(crop_size==0||width==0||height==0||num==0)
// return hipErrorInvalidValue;
//
// int offset = box_idx*4*sizeof(int);
// //the bbox
// int x1 = int(boxes_data+offset);
// int y1 = int(boxes_data+offset+1);
// int x2 = int(boxes_data+offset+2);
// int y2 = int(boxes_data+offset+3);
//
// // the width and height of area to crop
// int w = x2-x1;
// int h = y2-y1;
//
// // total pixels of area to crop
// int total_pixels = w*h;
//
// // creat a temp buffer to store
// float *temp_buffer = new float[total_pixels*3];
// const dim3 blockDim(8,8);
// const dim3 gridDim(iDivUp(w,blockDim.x),iDivUp(h,blockDim.y));
// crop_kernel<<<gridDim,blockDim>>>(x1,y2,x2,y2,image, temp_buffer);
//}
//
//void boxes2bactch(int num, int crop_size, int width, int height, float * boxes_data, cuda::GpuMat image, float * output_batch, float * hipStream_t& stream)
//{
// generate_batch_kernel<<<num,1,0,stream>>>(crop_size,width, height, boxes_data, image, output_batch);
//}
//__global__ void generatebox_kernel(int width, int height, float * scores, float * location, float pthreshold )
//{
// const int w = blockIdx.x;
// const int h = blockIdx.y;
// if(w<width && h< height)
// {
// float score = *(scores + width*height + w*height+ h );
// if(score > pthreshold)
// {
//
// }
// }
//
//
//}
//void gpu_generatebox(int width , int height , void * score, void * location, float scale, float pthreshold)
//{
//
// int stride = 2;
// int cellsize = 12;
// int count = 0;
// //score p
// void *p = (float*)score + width * height;
// void *plocal = (float*)location;
// struct Bbox bbox;
// struct orderScore order;
// for (int row = 0; row < score->height; row++) {
// for (int col = 0; col < score->width; col++) {
// if (*p > Pthreshold) {
// bbox.score = *p;
// order.score = *p;
// order.oriOrder = count;
// bbox.x1 = round((stride * row + 1) / scale);
// bbox.y1 = round((stride * col + 1) / scale);
// bbox.x2 = round((stride * row + 1 + cellsize) / scale);
// bbox.y2 = round((stride * col + 1 + cellsize) / scale);
// bbox.exist = true;
// bbox.area = (bbox.x2 - bbox.x1) * (bbox.y2 - bbox.y1);
// for (int channel = 0; channel < 4; channel++)
// bbox.regreCoord[channel] = *(plocal + channel * location->width * location->height);
// boundingBox_.push_back(bbox);
// bboxScore_.push_back(order);
// count++;
// }
// p++;
// plocal++;
// }
// }
// dim3 block(width, height);
// generatebox_kernel(width, height, score, location, pthreshold);
//}
| b75361d2e5d085240952b744c79d99f1c3c8b93b.cu | #include "kernels.h"
#include <thrust/device_vector.h>
using namespace cv::cuda;
inline __device__ __host__ int iDivUp( int a, int b ) { return (a % b != 0) ? (a / b + 1) : (a / b); }
__global__ void image2Matrix_kernel(int width, int height, PtrStepSz<uchar3> image, float* matrix){
const int w = blockIdx.x;
const int h = blockIdx.y;
float alpha = -127.5;
float beta = 0.0078125;
if (w < width && h < height)
{
uchar3 v = image(h,w);
*(matrix + 0*height*width + h*width + w) = (float(v.z)-alpha)*beta;
*(matrix + 1*height*width + h*width + w) = (float(v.y)-alpha)*beta;
*(matrix + 2*height*width + h*width + w) = (float(v.x)-alpha)*beta;
}
}
__global__ void image2Matrix_with_transpose_kernel(int width, int height, PtrStepSz<uchar3> image, float* matrix){
const int w = blockIdx.x;
const int h = blockIdx.y;
float alpha = -127.5;
float beta = 0.0078125;
if (w < width && h < height)
{
uchar3 v = image(w,h); //swap w and h to transpose
*(matrix + 0*height*width + h*width + w) = (float(v.z)-alpha)*beta;
*(matrix + 1*height*width + h*width + w) = (float(v.y)-alpha)*beta;
*(matrix + 2*height*width + h*width + w) = (float(v.x)-alpha)*beta;
}
}
void gpu_image2Matrix(int width, int height, cv::cuda::GpuMat & image, float* matrix, cudaStream_t &stream)
{
/*
image : input image in GpuMat format, WHC arrangement and BGR order
matrix: gpu float array, CHW and RGB order
*/
dim3 block(width, height); // width * height blocks, 1 thread each
image2Matrix_kernel<<<block,1,0,stream>>>(width,height,image,matrix);
}
void gpu_image2Matrix_with_transpose(int width, int height, cv::cuda::GpuMat & image, float* matrix, cudaStream_t &stream)
{
dim3 block(width, height); // width * height blocks, 1 thread each
image2Matrix_with_transpose_kernel<<<block,1,0,stream>>>(width,height,image,matrix);
}
//__global__ void crop_and_resize_kernel(int x1, int y1, int x2, int y2, int PtrStepSz<uchar3> image, int* temp_buffer)
//{
// const int x = blockDim.x*blockIdx.x+threadIdx.x;
// const int y = blockDim.y*blockIdx.y+threadIdx.y;
// if(x>=(x2-x1)||y>=(y2-y1))
// return;
// uchar3 v = image(y,x);
// temp_buffer[]
//}
//
//__global__ void generate_batch_kernel(int crop_size, int width, int height, int * boxes_data, PtrStepSz<uchar3> image, float * output_batch)
//{
// const int box_idx = blockIdx.x*blockDim.x+threadIdx.x;
// if(!boxes_data||!output_batch)
// return cudaErrorInvalidDevicePointer;
// if(crop_size==0||width==0||height==0||num==0)
// return cudaErrorInvalidValue;
//
// int offset = box_idx*4*sizeof(int);
// //the bbox
// int x1 = int(boxes_data+offset);
// int y1 = int(boxes_data+offset+1);
// int x2 = int(boxes_data+offset+2);
// int y2 = int(boxes_data+offset+3);
//
// // the width and height of area to crop
// int w = x2-x1;
// int h = y2-y1;
//
// // total pixels of area to crop
// int total_pixels = w*h;
//
// // creat a temp buffer to store
// float *temp_buffer = new float[total_pixels*3];
// const dim3 blockDim(8,8);
// const dim3 gridDim(iDivUp(w,blockDim.x),iDivUp(h,blockDim.y));
// crop_kernel<<<gridDim,blockDim>>>(x1,y2,x2,y2,image, temp_buffer);
//}
//
//void boxes2bactch(int num, int crop_size, int width, int height, float * boxes_data, cuda::GpuMat image, float * output_batch, float * cudaStream_t& stream)
//{
// generate_batch_kernel<<<num,1,0,stream>>>(crop_size,width, height, boxes_data, image, output_batch);
//}
//__global__ void generatebox_kernel(int width, int height, float * scores, float * location, float pthreshold )
//{
// const int w = blockIdx.x;
// const int h = blockIdx.y;
// if(w<width && h< height)
// {
// float score = *(scores + width*height + w*height+ h );
// if(score > pthreshold)
// {
//
// }
// }
//
//
//}
//void gpu_generatebox(int width , int height , void * score, void * location, float scale, float pthreshold)
//{
//
// int stride = 2;
// int cellsize = 12;
// int count = 0;
// //score p
// void *p = (float*)score + width * height;
// void *plocal = (float*)location;
// struct Bbox bbox;
// struct orderScore order;
// for (int row = 0; row < score->height; row++) {
// for (int col = 0; col < score->width; col++) {
// if (*p > Pthreshold) {
// bbox.score = *p;
// order.score = *p;
// order.oriOrder = count;
// bbox.x1 = round((stride * row + 1) / scale);
// bbox.y1 = round((stride * col + 1) / scale);
// bbox.x2 = round((stride * row + 1 + cellsize) / scale);
// bbox.y2 = round((stride * col + 1 + cellsize) / scale);
// bbox.exist = true;
// bbox.area = (bbox.x2 - bbox.x1) * (bbox.y2 - bbox.y1);
// for (int channel = 0; channel < 4; channel++)
// bbox.regreCoord[channel] = *(plocal + channel * location->width * location->height);
// boundingBox_.push_back(bbox);
// bboxScore_.push_back(order);
// count++;
// }
// p++;
// plocal++;
// }
// }
// dim3 block(width, height);
// generatebox_kernel(width, height, score, location, pthreshold);
//}
|
27d6f9425acdace3ec40416a4fd2db50591c45ee.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2020, Lorenzo Basso, Jack Lee, Matthew Zhang, Feiyang Chen
* Copyright (c) 2018, Francis Haghighi-Daly
* All rights reserved.
* This file is part of the WooStOr - Wavepacket prOpopgatiOn using SpliT OperatR method, subject to the GNU/GPL-3.0-or-later.*/
#include <mex.h>
#include <matrix.h>
#include <math.h>
#include "../MEX_helpers/complex.h"
#include "../MEX_helpers/cuda_helper.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
long long potential_ptr = mxGetScalar(prhs[0]);
int nx = mxGetScalar(prhs[1]);
int ny = mxGetScalar(prhs[2]);
int nz = mxGetScalar(prhs[3]);
myComplex *dev_potential = reinterpret_cast<myComplex *>(potential_ptr);
myComplex *potential = reinterpret_cast<myComplex *>(malloc(nx * ny * nz * sizeof(myComplex)));
hipMemcpy(potential, dev_potential, nx * ny * nz * sizeof(myComplex), hipMemcpyDeviceToHost);
for (int k=0; k<nz; k++) {
for (int i=0; i<nx; i++) {
for (int j=0; j<ny; j++) {
int idx = k*nx*ny+j*nx+i;
mexPrintf("(%e + i*%e) ", potential[idx].x, potential[idx].y);
}
mexPrintf("\n");
}
mexPrintf("\n");
}
free(potential);
}
| 27d6f9425acdace3ec40416a4fd2db50591c45ee.cu | /* Copyright (c) 2020, Lorenzo Basso, Jack Lee, Matthew Zhang, Feiyang Chen
* Copyright (c) 2018, Francis Haghighi-Daly
* All rights reserved.
* This file is part of the WooStOr - Wavepacket prOpopgatiOn using SpliT OperatR method, subject to the GNU/GPL-3.0-or-later.*/
#include <mex.h>
#include <matrix.h>
#include <math.h>
#include "../MEX_helpers/complex.h"
#include "../MEX_helpers/cuda_helper.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
long long potential_ptr = mxGetScalar(prhs[0]);
int nx = mxGetScalar(prhs[1]);
int ny = mxGetScalar(prhs[2]);
int nz = mxGetScalar(prhs[3]);
myComplex *dev_potential = reinterpret_cast<myComplex *>(potential_ptr);
myComplex *potential = reinterpret_cast<myComplex *>(malloc(nx * ny * nz * sizeof(myComplex)));
cudaMemcpy(potential, dev_potential, nx * ny * nz * sizeof(myComplex), cudaMemcpyDeviceToHost);
for (int k=0; k<nz; k++) {
for (int i=0; i<nx; i++) {
for (int j=0; j<ny; j++) {
int idx = k*nx*ny+j*nx+i;
mexPrintf("(%e + i*%e) ", potential[idx].x, potential[idx].y);
}
mexPrintf("\n");
}
mexPrintf("\n");
}
free(potential);
}
|
767eef5eae469d705edd99680ed0cf30d6c77600.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include"pathalg.h"
static const int WORK_SIZE =258;
void Bellmanor::copydata(int s,vector<edge>&edges,int nodenum){
};
void Bellmanor::dellocate(){
};
void Bellmanor::allocate(int maxn,int maxedge){
}
void Bellmanor::topsort()
{
};
void Bellmanor::updatE(vector<vector<int>>&tesigns)
{
esigns=tesigns;
for(int k=0;k<LY;k++)
{
int off=k*pnodesize*mm;
for(int i=0;i<pnodesize;i++)
{
for(int j=0;j<mm;j++)
if(j<rus[i].size())
rudw[off+i*mm+j]=esigns[k][ruw[i][j]];
else
rudw[off+i*mm+j]=-1;
}
}
hipMemcpy(dev_rudw,rudw,mm*LY*pnodesize*sizeof(int),hipMemcpyHostToDevice);
}
void Bellmanor::updatS(vector<vector<Sot>>&stpair)
{
L[0]=0;
L[1]=LY1;
L[2]=LY2;
S[0]=stpair[0].size();
S[1]=stpair[1].size();
stps=stpair;
int count=0;
ncount=L[1]*S[0]+L[2]*S[1];
memset(d,1,ncount*nodenum*sizeof(int));
memset(p,-1,ncount*nodenum*sizeof(int));
for(int k=0;k<L[1];k++)
{
for(int j=0;j<stpair[0].size();j++)
{
d[count*nodenum+stpair[0][j].s]=0;
count++;
}
}
for(int k=0;k<L[2];k++)
{
for(int j=0;j<stpair[1].size();j++)
{
d[count*nodenum+stpair[1][j].s]=0;
count++;
}
}
Size[0]=pnodesize*L[1]*S[0];
Size[1]=pnodesize*L[2]*S[1];
hipMemcpy(dev_d,d,ncount*nodenum*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_p,p,ncount*nodenum*sizeof(int),hipMemcpyHostToDevice);
}
void Bellmanor::init(pair<vector<edge>,vector<vector<int>>>ext,vector<pair<int,int>>stpair,int _nodenum)
{
//cout<<"init bellmanor"<<endl;
nodenum=_nodenum;
pnodesize=nodenum/(NUT);
edges=ext.first;
esigns=ext.second;
stp=stpair;
W=WD+1;
//st=new int[edges.size()*LY];
//te=new int[edges.size()*LY];
d=new int[nodenum*LY*YE];
p=new int[nodenum*LY*YE];
w=new int[edges.size()*LY];
esignes=new int[edges.size()*LY];
vector<vector<int>>nein(pnodesize*LY,vector<int>());
neibn=nein;
vector<vector<int>>neie(pnodesize,vector<int>());
vector<vector<int>>rs(pnodesize,vector<int>());
vector<vector<int>>rw(pnodesize,vector<int>());
rus=rs;
ruw=rw;
for(int i=0;i<edges.size();i++)
{
int s=edges[i].s;
int t=edges[i].t;
rus[t].push_back(s);
ruw[t].push_back(i);
neibn[s].push_back(t);
neie[s].push_back(i);
}
mm=0;
for(int i=0;i<rus.size();i++)
if(rus[i].size()>mm)mm=rus[i].size();
rudu=new int[nodenum*mm*LY];
rudw=new int[nodenum*mm*LY];
rid=new int[nodenum*mm*LY];
for(int k=0;k<LY;k++)
{
int off=k*pnodesize*mm;
for(int i=0;i<pnodesize;i++)
{
for(int j=0;j<mm;j++)
if(j<rus[i].size())
rudu[off+i*mm+j]=rus[i][j];
else
rudu[off+i*mm+j]=INT_MAX;
for(int j=0;j<mm;j++)
if(j<rus[i].size())
{
rudw[off+i*mm+j]=esigns[k][ruw[i][j]];
rid[off+i*mm+j]=ruw[i][j];
}
else
{
rudw[off+i*mm+j]=-1;
rid[off+i*mm+j]=-1;
}
}
}
int count=0;
hipMalloc((void**)&dev_d,YE*LY*nodenum*sizeof(int));
hipMalloc((void**)&dev_p,YE*LY*nodenum*sizeof(int));
//hipMalloc((void**)&dev_w,LY*edges.size()*sizeof(int));
//hipMalloc((void**)&dev_m1,sizeof(int));
//hipMalloc((void**)&dev_m2,sizeof(int));
hipMalloc((void**)&dev_rudu,mm*LY*pnodesize*sizeof(int));
hipMalloc((void**)&dev_rudw,mm*LY*pnodesize*sizeof(int));
hipMalloc((void**)&dev_rid,mm*LY*pnodesize*sizeof(int));
//hipMalloc((void**)&dev_ruid,mm*LY*nodenum*sizeof(int));
//hipMemcpy(dev_te,te,LY*edges.size()*sizeof(int),hipMemcpyHostToDevice);
//hipMemcpy(dev_st,st,LY*edges.size()*sizeof(int),hipMemcpyHostToDevice);
//hipMemcpy(dev_w,w,LY*edges.size()*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_rudu,rudu,mm*LY*pnodesize*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_rudw,rudw,mm*LY*pnodesize*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_rid,rid,mm*LY*pnodesize*sizeof(int),hipMemcpyHostToDevice);
//hipMemcpy(dev_ruid,ruid,mm*LY*nodenum*sizeof(int),hipMemcpyHostToDevice);
//hipMemcpy(dev_m1,m1,sizeof(int),hipMemcpyHostToDevice);
//hipMemcpy(dev_m2,m2,sizeof(int),hipMemcpyHostToDevice);
};
Bellmanor::Bellmanor():L(PC+1,0),S(PC,0),NF(PC,0),Size(2,0)
{
};
__global__ void bellmandu(int *rudu,int*rudw,int *rid,int *d,int*p,int K,int PN,int size,int sizeoff,int leveloff,int ye,int mm)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if(i>=size)return;
int yep=ye*PN;
int lyy=i/(yep);
int yee=(i%yep)/PN;
int nn=i%PN;
int off=(lyy*yep+yee*PN+sizeoff)*NUT+K*PN;
int roff=(nn+(lyy+leveloff)*PN)*mm;
i=off+nn;
int dm=d[i];
int pm=-1;
for(int k=0;k<mm;k++)
if(rudu[roff+k]<INT_MAX)
{
int node=rudu[roff+k]+off-PN;
if(rudw[roff+k]<0)continue;
if(dm>d[node]+rudw[roff+k])
{
dm=d[node]+rudw[roff+k];
pm=rid[roff+k];
}
}
if(d[i]>dm)
{
d[i]=dm,p[i]=pm;
}
}
vector<vector<Rout>> Bellmanor::routalg(int s,int t,int bw)
{
//cout<<"inbellman"<<endl;
int kk=1;
time_t start,end;
start=clock();
hipStream_t stream0;
hipStreamCreate(&stream0);
hipStream_t stream1;
hipStreamCreate(&stream1);
for(int i=1;i<WD+1;i++)
{
hipLaunchKernelGGL(( bellmandu), dim3(Size[0]/512+1),dim3(512),0,stream0, dev_rudu,dev_rudw,dev_rid,dev_d,dev_p,i,pnodesize,Size[0],0,0,S[0],mm);
hipLaunchKernelGGL(( bellmandu), dim3(Size[1]/512+1),dim3(512),0,stream1, dev_rudu,dev_rudw,dev_rid,dev_d,dev_p,i,pnodesize,Size[1],Size[0],L[1],S[1],mm);
}
hipStreamSynchronize(stream1);
hipStreamSynchronize(stream0);
hipMemcpy(d,dev_d,ncount*nodenum*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(p,dev_p,ncount*nodenum*sizeof(int),hipMemcpyDeviceToHost);
/*for(int i=0;i<ncount;i++)
{
cout<<"********************************************** "<<i<<endl;
for(int k=0;k<WD+1;k++)
{
for(int j=0;j<pnodesize;j++)
cout<<p[i*nodenum+k*pnodesize+j]<<" ";
cout<<endl;
}
}*/
end=clock();
vector<vector<Rout>>result(2,vector<Rout>());
vector<int>LL(3,0);
LL=L;
LL[2]+=LL[1];
int count=0;
for(int y=1;y<PC+1;y++)
for(int k=LL[y-1];k<LL[y];k++)
{
for(int l=0;l<stps[y-1].size();l++)
{
int offf=count*nodenum;
int s=stps[y-1][l].s;
vector<int>ters=stps[y-1][l].ters;
for(int i=0;i<ters.size();i++)
{
int id=stps[y-1][l].mmpid[ters[i]];
int hop=0;
int tt=ters[i];
int min=10000;
int prn=-1;
for(int i=1;i<W;i++)
{
if(d[offf+tt+i*pnodesize]<min)
{
min=d[offf+tt+i*pnodesize];
prn=offf+tt+i*pnodesize;
}
}
int offf=prn-tt;
int offer=offf;
if(prn<0)continue;
Rout S(s,tt,id,min,offf,k);
result[y-1].push_back(S);
}
count++;
}
}
//cout<<"GPU time is : "<<end-start<<endl;
return result;
};
/*
__global__ void bellmanhigh(int *st,int *te,int *d,int *has,int *w,int E,int N,int size,int *m,int round,int Leveloff,int numoff,int ye,int ly)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if(i>size)return;
int eid=(i%(E*ly));
int eeid=eid+Leveloff;
int s=st[eeid],t=te[eeid],weight=w[eeid];
if(weight<0)return;
int off=(i/(E*ly))*N+(eid/E)*N*ye+numoff;
//if(has[s+off]<round-1)return;
if(d[s+off]+weight<d[t+off])
{
d[t+off]=weight+d[s+off];
//has[t+off]=round;
*m=1;
}
}*/
/*__global__ void color(int *st,int *te,int *d,int *pre,int *has,int *w,int E,int N,int size,int round,int Leveloff,int numoff,int ye,int ly)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if(i>size)return;
int eid=(i%(E*ly));
int eeid=eid+Leveloff;
int s=st[eeid],t=te[eeid],weight=w[eeid];
if(weight<0)return;
int off=(i/(E*ly))*N+(eid/E)*N*ye+numoff;
//if(has[s+off]<round-1)return;
if(d[s+off]+weight==d[t+off])
pre[t+off]=s+off;
}*/
/*m1=1;
*m2=1;
int round=1;
cout<<"fuck wx!"<<endl;
int flag1=0,flag2=0;
int cc=0;
while(*m2==1||*m1==1)
{
*m2=0,*m1=0;
hipMemcpyAsync(dev_m2,m2,sizeof(int),hipMemcpyHostToDevice,stream1);
bellmanhigh<<<size[1]/1024+1,1024,0,stream1>>>(dev_st,dev_te,dev_d,dev_has,dev_w,edges.size(),nodenum,size[1],dev_m2,round,leveloff[1],nodeoff[1],S[1],L[1]);
hipMemcpyAsync(dev_m1,m1,sizeof(int),hipMemcpyHostToDevice,stream0);
bellmanhigh<<<size[0]/1024+1,1024,0,stream0>>>(dev_st,dev_te,dev_d,dev_has,dev_w,edges.size(),nodenum,size[0],dev_m2,round,leveloff[0],nodeoff[0],S[0],L[0]);
color<<<size[1]/1024+1,1024,0,stream1>>>(dev_st,dev_te,dev_d,dev_p,dev_has,dev_w,edges.size(),nodenum,size[1],round,leveloff[1],nodeoff[1],S[1],L[1]);
hipMemcpyAsync(m2,dev_m2,sizeof(int),hipMemcpyDeviceToHost,stream1);
color<<<size[0]/1024+1,1024,0,stream0>>>(dev_st,dev_te,dev_d,dev_p,dev_has,dev_w,edges.size(),nodenum,size[0],round,leveloff[0],nodeoff[0],S[0],L[0]);
hipMemcpyAsync(m1,dev_m1,sizeof(int),hipMemcpyDeviceToHost,stream0);
hipStreamSynchronize(stream1);
hipStreamSynchronize(stream0);
}*/
| 767eef5eae469d705edd99680ed0cf30d6c77600.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include"pathalg.h"
static const int WORK_SIZE =258;
void Bellmanor::copydata(int s,vector<edge>&edges,int nodenum){
};
void Bellmanor::dellocate(){
};
void Bellmanor::allocate(int maxn,int maxedge){
}
void Bellmanor::topsort()
{
};
void Bellmanor::updatE(vector<vector<int>>&tesigns)
{
esigns=tesigns;
for(int k=0;k<LY;k++)
{
int off=k*pnodesize*mm;
for(int i=0;i<pnodesize;i++)
{
for(int j=0;j<mm;j++)
if(j<rus[i].size())
rudw[off+i*mm+j]=esigns[k][ruw[i][j]];
else
rudw[off+i*mm+j]=-1;
}
}
cudaMemcpy(dev_rudw,rudw,mm*LY*pnodesize*sizeof(int),cudaMemcpyHostToDevice);
}
void Bellmanor::updatS(vector<vector<Sot>>&stpair)
{
L[0]=0;
L[1]=LY1;
L[2]=LY2;
S[0]=stpair[0].size();
S[1]=stpair[1].size();
stps=stpair;
int count=0;
ncount=L[1]*S[0]+L[2]*S[1];
memset(d,1,ncount*nodenum*sizeof(int));
memset(p,-1,ncount*nodenum*sizeof(int));
for(int k=0;k<L[1];k++)
{
for(int j=0;j<stpair[0].size();j++)
{
d[count*nodenum+stpair[0][j].s]=0;
count++;
}
}
for(int k=0;k<L[2];k++)
{
for(int j=0;j<stpair[1].size();j++)
{
d[count*nodenum+stpair[1][j].s]=0;
count++;
}
}
Size[0]=pnodesize*L[1]*S[0];
Size[1]=pnodesize*L[2]*S[1];
cudaMemcpy(dev_d,d,ncount*nodenum*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_p,p,ncount*nodenum*sizeof(int),cudaMemcpyHostToDevice);
}
void Bellmanor::init(pair<vector<edge>,vector<vector<int>>>ext,vector<pair<int,int>>stpair,int _nodenum)
{
//cout<<"init bellmanor"<<endl;
nodenum=_nodenum;
pnodesize=nodenum/(NUT);
edges=ext.first;
esigns=ext.second;
stp=stpair;
W=WD+1;
//st=new int[edges.size()*LY];
//te=new int[edges.size()*LY];
d=new int[nodenum*LY*YE];
p=new int[nodenum*LY*YE];
w=new int[edges.size()*LY];
esignes=new int[edges.size()*LY];
vector<vector<int>>nein(pnodesize*LY,vector<int>());
neibn=nein;
vector<vector<int>>neie(pnodesize,vector<int>());
vector<vector<int>>rs(pnodesize,vector<int>());
vector<vector<int>>rw(pnodesize,vector<int>());
rus=rs;
ruw=rw;
for(int i=0;i<edges.size();i++)
{
int s=edges[i].s;
int t=edges[i].t;
rus[t].push_back(s);
ruw[t].push_back(i);
neibn[s].push_back(t);
neie[s].push_back(i);
}
mm=0;
for(int i=0;i<rus.size();i++)
if(rus[i].size()>mm)mm=rus[i].size();
rudu=new int[nodenum*mm*LY];
rudw=new int[nodenum*mm*LY];
rid=new int[nodenum*mm*LY];
for(int k=0;k<LY;k++)
{
int off=k*pnodesize*mm;
for(int i=0;i<pnodesize;i++)
{
for(int j=0;j<mm;j++)
if(j<rus[i].size())
rudu[off+i*mm+j]=rus[i][j];
else
rudu[off+i*mm+j]=INT_MAX;
for(int j=0;j<mm;j++)
if(j<rus[i].size())
{
rudw[off+i*mm+j]=esigns[k][ruw[i][j]];
rid[off+i*mm+j]=ruw[i][j];
}
else
{
rudw[off+i*mm+j]=-1;
rid[off+i*mm+j]=-1;
}
}
}
int count=0;
cudaMalloc((void**)&dev_d,YE*LY*nodenum*sizeof(int));
cudaMalloc((void**)&dev_p,YE*LY*nodenum*sizeof(int));
//cudaMalloc((void**)&dev_w,LY*edges.size()*sizeof(int));
//cudaMalloc((void**)&dev_m1,sizeof(int));
//cudaMalloc((void**)&dev_m2,sizeof(int));
cudaMalloc((void**)&dev_rudu,mm*LY*pnodesize*sizeof(int));
cudaMalloc((void**)&dev_rudw,mm*LY*pnodesize*sizeof(int));
cudaMalloc((void**)&dev_rid,mm*LY*pnodesize*sizeof(int));
//cudaMalloc((void**)&dev_ruid,mm*LY*nodenum*sizeof(int));
//cudaMemcpy(dev_te,te,LY*edges.size()*sizeof(int),cudaMemcpyHostToDevice);
//cudaMemcpy(dev_st,st,LY*edges.size()*sizeof(int),cudaMemcpyHostToDevice);
//cudaMemcpy(dev_w,w,LY*edges.size()*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_rudu,rudu,mm*LY*pnodesize*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_rudw,rudw,mm*LY*pnodesize*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_rid,rid,mm*LY*pnodesize*sizeof(int),cudaMemcpyHostToDevice);
//cudaMemcpy(dev_ruid,ruid,mm*LY*nodenum*sizeof(int),cudaMemcpyHostToDevice);
//cudaMemcpy(dev_m1,m1,sizeof(int),cudaMemcpyHostToDevice);
//cudaMemcpy(dev_m2,m2,sizeof(int),cudaMemcpyHostToDevice);
};
Bellmanor::Bellmanor():L(PC+1,0),S(PC,0),NF(PC,0),Size(2,0)
{
};
__global__ void bellmandu(int *rudu,int*rudw,int *rid,int *d,int*p,int K,int PN,int size,int sizeoff,int leveloff,int ye,int mm)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if(i>=size)return;
int yep=ye*PN;
int lyy=i/(yep);
int yee=(i%yep)/PN;
int nn=i%PN;
int off=(lyy*yep+yee*PN+sizeoff)*NUT+K*PN;
int roff=(nn+(lyy+leveloff)*PN)*mm;
i=off+nn;
int dm=d[i];
int pm=-1;
for(int k=0;k<mm;k++)
if(rudu[roff+k]<INT_MAX)
{
int node=rudu[roff+k]+off-PN;
if(rudw[roff+k]<0)continue;
if(dm>d[node]+rudw[roff+k])
{
dm=d[node]+rudw[roff+k];
pm=rid[roff+k];
}
}
if(d[i]>dm)
{
d[i]=dm,p[i]=pm;
}
}
vector<vector<Rout>> Bellmanor::routalg(int s,int t,int bw)
{
//cout<<"inbellman"<<endl;
int kk=1;
time_t start,end;
start=clock();
cudaStream_t stream0;
cudaStreamCreate(&stream0);
cudaStream_t stream1;
cudaStreamCreate(&stream1);
for(int i=1;i<WD+1;i++)
{
bellmandu<<<Size[0]/512+1,512,0,stream0>>>(dev_rudu,dev_rudw,dev_rid,dev_d,dev_p,i,pnodesize,Size[0],0,0,S[0],mm);
bellmandu<<<Size[1]/512+1,512,0,stream1>>>(dev_rudu,dev_rudw,dev_rid,dev_d,dev_p,i,pnodesize,Size[1],Size[0],L[1],S[1],mm);
}
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream0);
cudaMemcpy(d,dev_d,ncount*nodenum*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(p,dev_p,ncount*nodenum*sizeof(int),cudaMemcpyDeviceToHost);
/*for(int i=0;i<ncount;i++)
{
cout<<"********************************************** "<<i<<endl;
for(int k=0;k<WD+1;k++)
{
for(int j=0;j<pnodesize;j++)
cout<<p[i*nodenum+k*pnodesize+j]<<" ";
cout<<endl;
}
}*/
end=clock();
vector<vector<Rout>>result(2,vector<Rout>());
vector<int>LL(3,0);
LL=L;
LL[2]+=LL[1];
int count=0;
for(int y=1;y<PC+1;y++)
for(int k=LL[y-1];k<LL[y];k++)
{
for(int l=0;l<stps[y-1].size();l++)
{
int offf=count*nodenum;
int s=stps[y-1][l].s;
vector<int>ters=stps[y-1][l].ters;
for(int i=0;i<ters.size();i++)
{
int id=stps[y-1][l].mmpid[ters[i]];
int hop=0;
int tt=ters[i];
int min=10000;
int prn=-1;
for(int i=1;i<W;i++)
{
if(d[offf+tt+i*pnodesize]<min)
{
min=d[offf+tt+i*pnodesize];
prn=offf+tt+i*pnodesize;
}
}
int offf=prn-tt;
int offer=offf;
if(prn<0)continue;
Rout S(s,tt,id,min,offf,k);
result[y-1].push_back(S);
}
count++;
}
}
//cout<<"GPU time is : "<<end-start<<endl;
return result;
};
/*
__global__ void bellmanhigh(int *st,int *te,int *d,int *has,int *w,int E,int N,int size,int *m,int round,int Leveloff,int numoff,int ye,int ly)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if(i>size)return;
int eid=(i%(E*ly));
int eeid=eid+Leveloff;
int s=st[eeid],t=te[eeid],weight=w[eeid];
if(weight<0)return;
int off=(i/(E*ly))*N+(eid/E)*N*ye+numoff;
//if(has[s+off]<round-1)return;
if(d[s+off]+weight<d[t+off])
{
d[t+off]=weight+d[s+off];
//has[t+off]=round;
*m=1;
}
}*/
/*__global__ void color(int *st,int *te,int *d,int *pre,int *has,int *w,int E,int N,int size,int round,int Leveloff,int numoff,int ye,int ly)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if(i>size)return;
int eid=(i%(E*ly));
int eeid=eid+Leveloff;
int s=st[eeid],t=te[eeid],weight=w[eeid];
if(weight<0)return;
int off=(i/(E*ly))*N+(eid/E)*N*ye+numoff;
//if(has[s+off]<round-1)return;
if(d[s+off]+weight==d[t+off])
pre[t+off]=s+off;
}*/
/*m1=1;
*m2=1;
int round=1;
cout<<"fuck wx!"<<endl;
int flag1=0,flag2=0;
int cc=0;
while(*m2==1||*m1==1)
{
*m2=0,*m1=0;
cudaMemcpyAsync(dev_m2,m2,sizeof(int),cudaMemcpyHostToDevice,stream1);
bellmanhigh<<<size[1]/1024+1,1024,0,stream1>>>(dev_st,dev_te,dev_d,dev_has,dev_w,edges.size(),nodenum,size[1],dev_m2,round,leveloff[1],nodeoff[1],S[1],L[1]);
cudaMemcpyAsync(dev_m1,m1,sizeof(int),cudaMemcpyHostToDevice,stream0);
bellmanhigh<<<size[0]/1024+1,1024,0,stream0>>>(dev_st,dev_te,dev_d,dev_has,dev_w,edges.size(),nodenum,size[0],dev_m2,round,leveloff[0],nodeoff[0],S[0],L[0]);
color<<<size[1]/1024+1,1024,0,stream1>>>(dev_st,dev_te,dev_d,dev_p,dev_has,dev_w,edges.size(),nodenum,size[1],round,leveloff[1],nodeoff[1],S[1],L[1]);
cudaMemcpyAsync(m2,dev_m2,sizeof(int),cudaMemcpyDeviceToHost,stream1);
color<<<size[0]/1024+1,1024,0,stream0>>>(dev_st,dev_te,dev_d,dev_p,dev_has,dev_w,edges.size(),nodenum,size[0],round,leveloff[0],nodeoff[0],S[0],L[0]);
cudaMemcpyAsync(m1,dev_m1,sizeof(int),cudaMemcpyDeviceToHost,stream0);
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream0);
}*/
|
d143fdf7e4c4533a8eb166adf6d8479c08efe16a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o cuda_crack.cu cuda_crack
./cuda_crack
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt)
{
char plain_password[] = "RG";
char *a = attempt;
char *p = plain_password;
while(*a == *p)
{
if(*a == '\0')
{
return 1;
}
a++;
p++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel()
{
char i, j;
int threadID = threadIdx.x;
int blockID = blockIdx.x;
int tID = blockID * blockDim.x + threadID;
// prints a unique thread id.
printf("Thread id is %d\n", tID);
char password[3];
password[2] = '\0';
for(i='A'; i<='Z'; i++)
{
password[0] = i;
for(j='A'; j<='Z'; j++)
{
password[1] = j;
if(is_a_match(password))
{
printf("password found: %s\n, Thread Id is %d\n threadID" , password, tID);
}
else
{
//printf("tried: %s\n", password);
}
}
}
}
int main()
{
char arrayLetters[26] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'};
char *gpuLetters;
//cudamalloc
hipMalloc((void**) &gpuLetters, 26*sizeof(char));
hipMemcpy(arrayLetters, gpuLetters, 26*sizeof(char), hipMemcpyHostToDevice);
// kernel to launch the program, 26, 26 for 26 blocks and 26 threads;
hipLaunchKernelGGL(( kernel) , dim3(26), dim3(26), 0, 0, );
hipDeviceSynchronize();
//cudamemcpy
hipMemcpy(gpuLetters, arrayLetters, 26*sizeof(char), hipMemcpyDeviceToHost);
return 0;
}
| d143fdf7e4c4533a8eb166adf6d8479c08efe16a.cu | #include <stdio.h>
#include <cuda_runtime_api.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o cuda_crack.cu cuda_crack
./cuda_crack
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt)
{
char plain_password[] = "RG";
char *a = attempt;
char *p = plain_password;
while(*a == *p)
{
if(*a == '\0')
{
return 1;
}
a++;
p++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel()
{
char i, j;
int threadID = threadIdx.x;
int blockID = blockIdx.x;
int tID = blockID * blockDim.x + threadID;
// prints a unique thread id.
printf("Thread id is %d\n", tID);
char password[3];
password[2] = '\0';
for(i='A'; i<='Z'; i++)
{
password[0] = i;
for(j='A'; j<='Z'; j++)
{
password[1] = j;
if(is_a_match(password))
{
printf("password found: %s\n, Thread Id is %d\n threadID" , password, tID);
}
else
{
//printf("tried: %s\n", password);
}
}
}
}
int main()
{
char arrayLetters[26] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'};
char *gpuLetters;
//cudamalloc
cudaMalloc((void**) &gpuLetters, 26*sizeof(char));
cudaMemcpy(arrayLetters, gpuLetters, 26*sizeof(char), cudaMemcpyHostToDevice);
// kernel to launch the program, 26, 26 for 26 blocks and 26 threads;
kernel <<<26, 26>>>();
cudaThreadSynchronize();
//cudamemcpy
cudaMemcpy(gpuLetters, arrayLetters, 26*sizeof(char), cudaMemcpyDeviceToHost);
return 0;
}
|
568ec33cb9eaa3a9c62a0d1b4ac44bb438ad8ef5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cm.h"
#include "zone_map.h"
using namespace mgpu;
vector<void*> alloced_mem;
template<typename T>
struct distinct : public binary_function<T,T,T>
{
__host__ __device__ T operator()(const T &lhs, const T &rhs) const {
return lhs != rhs;
}
};
void select(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums, queue<float_type> op_nums_f, queue<unsigned int> op_nums_precision, CudaSet* a,
CudaSet* b, vector<thrust::device_vector<int_type> >& distinct_tmp, bool& one_liner)
{
stack<string> exe_type;
stack<string> exe_value;
stack<int_type*> exe_vectors;
stack<int_type> exe_nums;
string s1, s2, s1_val, s2_val;
int_type n1, n2, res;
unsigned int colCount = 0;
stack<int> col_type;
string grp_type;
stack<string> grp_type1;
stack<string> col_val;
size_t res_size = 0;
stack<string> exe_value1;
stack<int_type*> exe_vectors1;
stack<float_type*> exe_vectors1_d;
stack<int_type> exe_nums1;
stack<unsigned int> exe_precision;
stack<unsigned int> exe_precision1;
bool ts;
stack<bool> exe_ts;
stack<float_type*> exe_vectors_f;
stack<float_type> exe_nums_f;
float_type n1_f, n2_f, res_f;
bool one_line;
unsigned int dist_processed = 0;
bool prep = 0;
one_line = 0;
thrust::device_ptr<bool> d_di(thrust::raw_pointer_cast(a->grp.data()));
std::auto_ptr<ReduceByKeyPreprocessData> ppData;
if (a->grp_count && (a->mRecCount != 0))
res_size = a->grp_count;
std::clock_t start1 = std::clock();
for(int i=0; !op_type.empty(); ++i, op_type.pop()) {
string ss = op_type.front();
//cout << ss << endl;
if(ss.compare("emit sel_name") != 0) {
grp_type = "NULL";
if (ss.compare("COUNT") == 0 || ss.compare("SUM") == 0 || ss.compare("AVG") == 0 || ss.compare("MIN") == 0 || ss.compare("MAX") == 0 || ss.compare("DISTINCT") == 0 || ss.compare("YEAR") == 0) {
if(!prep && a->grp_count) {
mgpu::ReduceByKeyPreprocess<float_type>((int)a->mRecCount, thrust::raw_pointer_cast(d_di),
(bool*)0, head_flag_predicate<bool>(), (int*)0, (int*)0,
&ppData, *context);
prep = 1;
};
if(!a->grp_count && ss.compare("YEAR"))
one_line = 1;
if (ss.compare("YEAR") == 0) {
s1_val = exe_value.top();
exe_value.pop();
exe_type.pop();
thrust::device_ptr<int_type> res = thrust::device_malloc<int_type>(a->mRecCount);
thrust::transform(a->d_columns_int[s1_val].begin(), a->d_columns_int[s1_val].begin() + a->mRecCount, thrust::make_constant_iterator(10000), res, thrust::divides<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(res));
exe_type.push("VECTOR");
exe_precision.push(a->decimal_zeroes[s1_val]);
};
if (ss.compare("DISTINCT") == 0) {
s1_val = exe_value.top();
exe_type.pop();
exe_value.pop();
if(a->type[s1_val] == 0) {
thrust::copy(a->d_columns_int[s1_val].begin(), a->d_columns_int[s1_val].begin() + a->mRecCount,
distinct_tmp[dist_processed].begin());
dist_processed++;
thrust::device_ptr<int_type> res = thrust::device_malloc<int_type>(res_size);
exe_vectors.push(thrust::raw_pointer_cast(res));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 2) {
//will add a DISTINCT on strings if anyone needs it
cout << "DISTINCT on strings is not supported yet" << endl;
exit(0);
}
else {
cout << "DISTINCT on float is not supported yet" << endl;
exit(0);
};
}
else if (ss.compare("COUNT") == 0) {
s1 = exe_type.top();
if(s1.compare("VECTOR") != 0) { // non distinct
grp_type = "COUNT";
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if (a->grp_count) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
if(alloced_mem.empty()) {
alloc_pool(a->maxRecs);
};
thrust::device_ptr<int_type> const_seq((int_type*)alloced_mem.back());
thrust::fill(const_seq, const_seq+a->mRecCount, (int_type)1);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(const_seq), (int_type)0,
mgpu::plus<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
//thrust::device_free(const_seq);
//thrust::reduce_by_key(d_di, d_di+(a->mRecCount), thrust::constant_iterator<int_type>(1),
// thrust::make_discard_iterator(), count_diff,
// head_flag_predicate<bool>(),thrust::plus<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else {
thrust::device_ptr<int_type> dest = thrust::device_malloc<int_type>(1);
dest[0] = a->mRecCount;
exe_vectors.push(thrust::raw_pointer_cast(dest));
exe_type.push("VECTOR");
};
}
else
grp_type = "COUNTD";
exe_precision.push(0);
}
else if (ss.compare("SUM") == 0) {
/*if(op_case) {
cout << "found case " << endl;
op_case = 0;
while(!exe_type.empty())
{
cout << "CASE type " << exe_type.top() << endl;
exe_type.pop();
exit(0);
}
};
*/
grp_type = "SUM";
s1 = exe_type.top();
exe_type.pop();
if (s1.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
if (a->grp_count) {
thrust::device_ptr<float_type> source((float_type*)(s3));
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
ReduceByKeyApply(*ppData, s3, (float_type)0,
mgpu::plus<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
//thrust::reduce_by_key(d_di, d_di + a->mRecCount, source,
// thrust::make_discard_iterator(), count_diff,
// head_flag_predicate<bool>(),thrust::plus<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
else {
thrust::device_ptr<float_type> source((float_type*)(s3));
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(1);
count_diff[0] = mgpu::Reduce(thrust::raw_pointer_cast(source), a->mRecCount, *context);
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
a->mRecCount = 1;
};
hipFree(s3);
}
if (s1.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
if (a->grp_count) {
thrust::device_ptr<int_type> source((int_type*)(s3));
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(source), (int_type)0,
mgpu::plus<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else {
thrust::device_ptr<int_type> source((int_type*)(s3));
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(1);
count_diff[0] = mgpu::Reduce(thrust::raw_pointer_cast(source), a->mRecCount, *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
};
hipFree(s3);
}
else if (s1.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
if (a->grp_count) {
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), (int_type)0,
mgpu::plus<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), (float_type)0,
mgpu::plus<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
}
else {
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> dest;
int_type cc = mgpu::Reduce(thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), a->mRecCount, *context);
if (one_line) {
dest = thrust::device_malloc<int_type>(1);
dest[0] = cc;
}
else {
dest = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(dest, dest+(a->mRecCount), cc, (int_type)0);
};
exe_vectors.push(thrust::raw_pointer_cast(dest));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> dest;
float_type cc = mgpu::Reduce(thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), a->mRecCount, *context);
if (one_line) {
dest = thrust::device_malloc<float_type>(1);
dest[0] = cc;
}
else {
dest = thrust::device_malloc<float_type>(a->mRecCount);
thrust::sequence(dest, dest+a->mRecCount, cc, (float_type)0);
};
exe_vectors_f.push(thrust::raw_pointer_cast(dest));
exe_type.push("VECTOR F");
};
};
exe_precision.push(a->decimal_zeroes[s1_val]);
}
}
else if (ss.compare("MIN") == 0) {
grp_type = "MIN";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if(alloced_mem.empty()) {
alloc_pool(a->maxRecs);
};
thrust::device_ptr<unsigned int> d_di1((unsigned int*)alloced_mem.back());
thrust::copy(d_di, d_di+a->mRecCount,d_di1);
thrust::exclusive_scan(d_di1, d_di1+a->mRecCount, d_di1);
thrust::equal_to<unsigned int> binary_pred;
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
//ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), (int_type)0,
// mgpu::minimum<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
thrust::reduce_by_key(d_di1, d_di1+a->mRecCount, a->d_columns_int[s1_val].begin(),
thrust::make_discard_iterator(), count_diff,
binary_pred, thrust::minimum<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
//ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), (float_type)0,
// mgpu::minimum<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
thrust::reduce_by_key(d_di1, d_di1+a->mRecCount, a->d_columns_float[s1_val].begin(),
thrust::make_discard_iterator(), count_diff,
binary_pred, thrust::minimum<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
exe_precision.push(a->decimal_zeroes[s1_val]);
}
else if (ss.compare("MAX") == 0) {
grp_type = "MAX";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), (int_type)0,
mgpu::maximum<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), (float_type)0,
mgpu::maximum<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
//thrust::reduce_by_key(d_di, d_di+(a->mRecCount), a->d_columns_float[s1_val].begin(),
// thrust::make_discard_iterator(), count_diff,
// head_flag_predicate<bool>(), thrust::maximum<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
exe_precision.push(a->decimal_zeroes[s1_val]);
}
else if (ss.compare("AVG") == 0) {
grp_type = "AVG";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), (int_type)0,
mgpu::plus<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), (float_type)0,
mgpu::plus<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
exe_precision.push(a->decimal_zeroes[s1_val]);
};
};
if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("FLOAT") == 0 || ss.compare("VECTOR") == 0 || ss.compare("VECTOR F") == 0) {
exe_type.push(ss);
if (ss.compare("NUMBER") == 0) {
exe_nums.push(op_nums.front());
op_nums.pop();
exe_precision.push(op_nums_precision.front());
op_nums_precision.pop();
}
if (ss.compare("FLOAT") == 0) {
exe_nums_f.push(op_nums_f.front());
op_nums_f.pop();
}
else if (ss.compare("NAME") == 0) {
exe_value.push(op_value.front());
ts = a->ts_cols[op_value.front()];
op_value.pop();
}
}
else {
if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) {
// get 2 values from the stack
s1 = exe_type.top();
exe_type.pop();
s2 = exe_type.top();
exe_type.pop();
if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
n2 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1)
n1 = n1*(unsigned int)pow(10,p1);
if(p2)
n2 = n2*(unsigned int)pow(10,p2);
if (ss.compare("ADD") == 0 )
res = n1+n2;
else if (ss.compare("MUL") == 0 )
res = n1*n2;
else if (ss.compare("DIV") == 0 )
res = n1/n2;
else
res = n1-n2;
thrust::device_ptr<int_type> p = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(p, p+(a->mRecCount),res,(int_type)0);
exe_type.push("VECTOR");
exe_vectors.push(thrust::raw_pointer_cast(p));
}
else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2_f = exe_nums_f.top();
exe_nums_f.pop();
if (ss.compare("ADD") == 0 )
res_f = n1_f+n2_f;
else if (ss.compare("MUL") == 0 )
res_f = n1_f*n2_f;
else if (ss.compare("DIV") == 0 )
res_f = n1_f/n2_f;
else
res_f = n1_f-n2_f;
thrust::device_ptr<float_type> p = thrust::device_malloc<float_type>(a->mRecCount);
thrust::sequence(p, p+(a->mRecCount),res_f,(float_type)0);
exe_type.push("VECTOR F");
exe_vectors_f.push(thrust::raw_pointer_cast(p));
}
else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR F");
if (a->type[s1_val] == 1) {
float_type* t = a->get_float_type_by_name(s1_val);
exe_vectors_f.push(a->op(t,n1_f,ss,1));
}
else {
int_type* t = a->get_int_by_name(s1_val);
exe_vectors_f.push(a->op(t,n1_f,ss,1));
};
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR F");
if (a->type[s2_val] == 1) {
float_type* t = a->get_float_type_by_name(s2_val);
exe_vectors_f.push(a->op(t,n1_f,ss,0));
}
else {
int_type* t = a->get_int_by_name(s2_val);
exe_vectors_f.push(a->op(t,n1_f,ss,0));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = a->decimal_zeroes[s1_val];
if (a->type[s1_val] == 1) {
float_type* t = a->get_float_type_by_name(s1_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,(float_type)n1,ss,1));
}
else {
int_type* t = a->get_int_by_name(s1_val);
auto pres = precision_func(p2, p1, ss);
exe_precision.push(pres);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,n1,ss,1, p2, p1));
};
}
else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
s2_val = exe_value.top();
exe_value.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = a->decimal_zeroes[s2_val];
if (a->type[s2_val] == 1) {
float_type* t = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,(float_type)n1,ss,0));
}
else {
int_type* t = a->get_int_by_name(s2_val);
auto pres = precision_func(p2, p1, ss);
exe_precision.push(pres);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,n1,ss,0, p2, p1));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
if (a->type[s1_val] == 0) {
int_type* t1 = a->get_int_by_name(s1_val);
if (a->type[s2_val] == 0) {
int_type* t = a->get_int_by_name(s2_val);
auto p1 = a->decimal_zeroes[s1_val];
auto p2 = a->decimal_zeroes[s2_val];
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,t1,ss,0,p2,p1));
}
else {
float_type* t = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
};
}
else {
float_type* t = a->get_float_type_by_name(s1_val);
if (a->type[s2_val] == 0) {
int_type* t1 = a->get_int_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
}
else {
float_type* t1 = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
};
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0 ) && s2.compare("NAME") == 0) {
s2_val = exe_value.top();
exe_value.pop();
if (a->type[s2_val] == 0) {
int_type* t = a->get_int_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = a->decimal_zeroes[s2_val];
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(t,s3,ss,0,p2,p1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,0));
alloced_mem.push_back(s3);
}
}
else {
float_type* t = a->get_float_type_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,t, ss,0));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,0));
alloced_mem.push_back(s3);
}
};
}
else if ((s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0 ) && s1.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
if (a->type[s1_val] == 0) {
int_type* t = a->get_int_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = a->decimal_zeroes[s1_val];
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(t,s3,ss,1,p2,p1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,1));
alloced_mem.push_back(s3);
}
}
else {
float_type* t = a->get_float_type_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,t,ss,1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,1));
alloced_mem.push_back(s3);
}
};
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(s3,n1, ss,1, p1, p2));
//hipFree(s3);
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,(float_type)n1, ss,1));
alloced_mem.push_back(s3);
}
}
else if (s1.compare("NUMBER") == 0 && (s2.compare("VECTOR") || s2.compare("VECTOR F") == 0)) {
n1 = exe_nums.top();
exe_nums.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p2, p1, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(s3,n1, ss,0, p2, p1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,(float_type)n1, ss,0));
alloced_mem.push_back(s3);
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,1));
alloced_mem.push_back(s3);
}
}
else if (s1.compare("FLOAT") == 0 && (s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0)) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,0));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,0));
alloced_mem.push_back(s3);
}
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
int_type* s4 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(s3, s4,ss,0,p1,p2));
alloced_mem.push_back(s3);
alloced_mem.push_back(s4);
}
else if(s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,1));
alloced_mem.push_back(s3);
alloced_mem.push_back(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,0));
alloced_mem.push_back(s3);
alloced_mem.push_back(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,1));
alloced_mem.push_back(s3);
alloced_mem.push_back(s4);
}
}
}
} //
else {
// here we need to save what is where
col_val.push(op_value.front());
op_value.pop();
grp_type1.push(grp_type);
if(!exe_nums.empty()) { //number
col_type.push(0);
exe_nums1.push(exe_nums.top());
exe_nums.pop();
exe_precision1.push(exe_precision.top());
exe_precision.pop();
};
if(!exe_value.empty()) { //field name
col_type.push(1);
exe_precision1.push(a->decimal_zeroes[exe_value.top()]);
exe_value1.push(exe_value.top());
exe_ts.push(ts);
exe_value.pop();
};
if(!exe_vectors.empty()) { //vector int
exe_vectors1.push(exe_vectors.top());
exe_vectors.pop();
col_type.push(2);
exe_precision1.push(exe_precision.top());
exe_precision.pop();
};
if(!exe_vectors_f.empty()) { //vector float
exe_vectors1_d.push(exe_vectors_f.top());
exe_vectors_f.pop();
col_type.push(3);
};
colCount++;
};
};
for(unsigned int j=0; j < colCount; j++) {
if ((grp_type1.top()).compare("COUNT") == 0 )
b->grp_type[col_val.top()] = 0;
else if ((grp_type1.top()).compare("AVG") == 0 )
b->grp_type[col_val.top()] = 1;
else if ((grp_type1.top()).compare("SUM") == 0 )
b->grp_type[col_val.top()] = 2;
else if ((grp_type1.top()).compare("NULL") == 0 )
b->grp_type[col_val.top()] = 3;
else if ((grp_type1.top()).compare("MIN") == 0 )
b->grp_type[col_val.top()] = 4;
else if ((grp_type1.top()).compare("MAX") == 0 )
b->grp_type[col_val.top()] = 5;
else if ((grp_type1.top()).compare("COUNTD") == 0 ) {
b->grp_type[col_val.top()] = 6;
};
if(col_type.top() == 0) {
// create a vector
if (a->grp_count) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::copy_if(thrust::make_constant_iterator((int)exe_nums1.top()), thrust::make_constant_iterator((int)exe_nums1.top()) + a->mRecCount, d_di, count_diff, thrust::identity<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , col_val.top(), res_size);
thrust::device_free(count_diff);
}
else {
thrust::device_ptr<int_type> s = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(s, s+(a->mRecCount), (int)exe_nums1.top(), 0);
b->addDeviceColumn(thrust::raw_pointer_cast(s), col_val.top(), a->mRecCount);
}
exe_nums1.pop();
b->decimal_zeroes[col_val.top()] = exe_precision1.top();
exe_precision1.pop();
}
else if(col_type.top() == 1) {
if(a->type[exe_value1.top()] == 0 || a->type[exe_value1.top()] == 2) {
//modify what we push there in case of a grouping
if (a->grp_count) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
//thrust::device_ptr<bool> d_grp(a->grp);
thrust::copy_if(a->d_columns_int[exe_value1.top()].begin(),a->d_columns_int[exe_value1.top()].begin() + a->mRecCount,
d_di, count_diff, thrust::identity<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , col_val.top(), res_size);
thrust::device_free(count_diff);
}
else
b->addDeviceColumn(thrust::raw_pointer_cast(a->d_columns_int[exe_value1.top()].data()) , col_val.top(), a->mRecCount);
if(a->type[exe_value1.top()] == 0) {
b->decimal_zeroes[col_val.top()] = exe_precision1.top();
b->ts_cols[col_val.top()] = exe_ts.top();
};
if(a->type[exe_value1.top()] == 2 || (a->type[exe_value1.top()] == 0 && a->string_map.find(exe_value1.top()) != a->string_map.end())) {
b->string_map[col_val.top()] = a->string_map[exe_value1.top()];
};
exe_precision1.pop();
exe_ts.pop();
}
else if(a->type[exe_value1.top()] == 1) {
//modify what we push there in case of a grouping
if (a->grp_count) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
//thrust::device_ptr<bool> d_grp(a->grp);
thrust::copy_if(a->d_columns_float[exe_value1.top()].begin(), a->d_columns_float[exe_value1.top()].begin() + a->mRecCount,
d_di, count_diff, thrust::identity<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , col_val.top(), res_size, a->decimal[exe_value1.top()]);
thrust::device_free(count_diff);
}
else {
b->addDeviceColumn(thrust::raw_pointer_cast(a->d_columns_float[exe_value1.top()].data()), col_val.top(), a->mRecCount, a->decimal[exe_value1.top()]);
};
}
exe_value1.pop();
}
else if(col_type.top() == 2) { // int
if (a->grp_count)
b->addDeviceColumn(exe_vectors1.top() , col_val.top(), res_size);
else {
if(!one_line)
b->addDeviceColumn(exe_vectors1.top() , col_val.top(), a->mRecCount);
else
b->addDeviceColumn(exe_vectors1.top() , col_val.top(), 1);
};
hipFree(exe_vectors1.top());
exe_vectors1.pop();
b->decimal_zeroes[col_val.top()] = exe_precision1.top();
exe_precision1.pop();
}
else if(col_type.top() == 3) { //float
if (a->grp_count) {
b->addDeviceColumn(exe_vectors1_d.top() , col_val.top(), res_size, 1);
}
else {
if(!one_line) {
b->addDeviceColumn(exe_vectors1_d.top() , col_val.top(), a->mRecCount, 1);
}
else {
b->addDeviceColumn(exe_vectors1_d.top() , col_val.top(), 1, 1);
};
};
hipFree(exe_vectors1_d.top());
exe_vectors1_d.pop();
};
col_type.pop();
col_val.pop();
grp_type1.pop();
};
if (!a->grp_count) {
if(!one_line)
b->mRecCount = a->mRecCount;
else
b->mRecCount = 1;
one_liner = one_line;
}
else {
b->mRecCount = res_size;
one_liner = 0;
};
}
| 568ec33cb9eaa3a9c62a0d1b4ac44bb438ad8ef5.cu | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cm.h"
#include "zone_map.h"
using namespace mgpu;
vector<void*> alloced_mem;
template<typename T>
struct distinct : public binary_function<T,T,T>
{
__host__ __device__ T operator()(const T &lhs, const T &rhs) const {
return lhs != rhs;
}
};
void select(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums, queue<float_type> op_nums_f, queue<unsigned int> op_nums_precision, CudaSet* a,
CudaSet* b, vector<thrust::device_vector<int_type> >& distinct_tmp, bool& one_liner)
{
stack<string> exe_type;
stack<string> exe_value;
stack<int_type*> exe_vectors;
stack<int_type> exe_nums;
string s1, s2, s1_val, s2_val;
int_type n1, n2, res;
unsigned int colCount = 0;
stack<int> col_type;
string grp_type;
stack<string> grp_type1;
stack<string> col_val;
size_t res_size = 0;
stack<string> exe_value1;
stack<int_type*> exe_vectors1;
stack<float_type*> exe_vectors1_d;
stack<int_type> exe_nums1;
stack<unsigned int> exe_precision;
stack<unsigned int> exe_precision1;
bool ts;
stack<bool> exe_ts;
stack<float_type*> exe_vectors_f;
stack<float_type> exe_nums_f;
float_type n1_f, n2_f, res_f;
bool one_line;
unsigned int dist_processed = 0;
bool prep = 0;
one_line = 0;
thrust::device_ptr<bool> d_di(thrust::raw_pointer_cast(a->grp.data()));
std::auto_ptr<ReduceByKeyPreprocessData> ppData;
if (a->grp_count && (a->mRecCount != 0))
res_size = a->grp_count;
std::clock_t start1 = std::clock();
for(int i=0; !op_type.empty(); ++i, op_type.pop()) {
string ss = op_type.front();
//cout << ss << endl;
if(ss.compare("emit sel_name") != 0) {
grp_type = "NULL";
if (ss.compare("COUNT") == 0 || ss.compare("SUM") == 0 || ss.compare("AVG") == 0 || ss.compare("MIN") == 0 || ss.compare("MAX") == 0 || ss.compare("DISTINCT") == 0 || ss.compare("YEAR") == 0) {
if(!prep && a->grp_count) {
mgpu::ReduceByKeyPreprocess<float_type>((int)a->mRecCount, thrust::raw_pointer_cast(d_di),
(bool*)0, head_flag_predicate<bool>(), (int*)0, (int*)0,
&ppData, *context);
prep = 1;
};
if(!a->grp_count && ss.compare("YEAR"))
one_line = 1;
if (ss.compare("YEAR") == 0) {
s1_val = exe_value.top();
exe_value.pop();
exe_type.pop();
thrust::device_ptr<int_type> res = thrust::device_malloc<int_type>(a->mRecCount);
thrust::transform(a->d_columns_int[s1_val].begin(), a->d_columns_int[s1_val].begin() + a->mRecCount, thrust::make_constant_iterator(10000), res, thrust::divides<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(res));
exe_type.push("VECTOR");
exe_precision.push(a->decimal_zeroes[s1_val]);
};
if (ss.compare("DISTINCT") == 0) {
s1_val = exe_value.top();
exe_type.pop();
exe_value.pop();
if(a->type[s1_val] == 0) {
thrust::copy(a->d_columns_int[s1_val].begin(), a->d_columns_int[s1_val].begin() + a->mRecCount,
distinct_tmp[dist_processed].begin());
dist_processed++;
thrust::device_ptr<int_type> res = thrust::device_malloc<int_type>(res_size);
exe_vectors.push(thrust::raw_pointer_cast(res));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 2) {
//will add a DISTINCT on strings if anyone needs it
cout << "DISTINCT on strings is not supported yet" << endl;
exit(0);
}
else {
cout << "DISTINCT on float is not supported yet" << endl;
exit(0);
};
}
else if (ss.compare("COUNT") == 0) {
s1 = exe_type.top();
if(s1.compare("VECTOR") != 0) { // non distinct
grp_type = "COUNT";
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if (a->grp_count) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
if(alloced_mem.empty()) {
alloc_pool(a->maxRecs);
};
thrust::device_ptr<int_type> const_seq((int_type*)alloced_mem.back());
thrust::fill(const_seq, const_seq+a->mRecCount, (int_type)1);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(const_seq), (int_type)0,
mgpu::plus<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
//thrust::device_free(const_seq);
//thrust::reduce_by_key(d_di, d_di+(a->mRecCount), thrust::constant_iterator<int_type>(1),
// thrust::make_discard_iterator(), count_diff,
// head_flag_predicate<bool>(),thrust::plus<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else {
thrust::device_ptr<int_type> dest = thrust::device_malloc<int_type>(1);
dest[0] = a->mRecCount;
exe_vectors.push(thrust::raw_pointer_cast(dest));
exe_type.push("VECTOR");
};
}
else
grp_type = "COUNTD";
exe_precision.push(0);
}
else if (ss.compare("SUM") == 0) {
/*if(op_case) {
cout << "found case " << endl;
op_case = 0;
while(!exe_type.empty())
{
cout << "CASE type " << exe_type.top() << endl;
exe_type.pop();
exit(0);
}
};
*/
grp_type = "SUM";
s1 = exe_type.top();
exe_type.pop();
if (s1.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
if (a->grp_count) {
thrust::device_ptr<float_type> source((float_type*)(s3));
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
ReduceByKeyApply(*ppData, s3, (float_type)0,
mgpu::plus<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
//thrust::reduce_by_key(d_di, d_di + a->mRecCount, source,
// thrust::make_discard_iterator(), count_diff,
// head_flag_predicate<bool>(),thrust::plus<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
else {
thrust::device_ptr<float_type> source((float_type*)(s3));
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(1);
count_diff[0] = mgpu::Reduce(thrust::raw_pointer_cast(source), a->mRecCount, *context);
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
a->mRecCount = 1;
};
cudaFree(s3);
}
if (s1.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
if (a->grp_count) {
thrust::device_ptr<int_type> source((int_type*)(s3));
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(source), (int_type)0,
mgpu::plus<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else {
thrust::device_ptr<int_type> source((int_type*)(s3));
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(1);
count_diff[0] = mgpu::Reduce(thrust::raw_pointer_cast(source), a->mRecCount, *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
};
cudaFree(s3);
}
else if (s1.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
if (a->grp_count) {
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), (int_type)0,
mgpu::plus<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), (float_type)0,
mgpu::plus<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
}
else {
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> dest;
int_type cc = mgpu::Reduce(thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), a->mRecCount, *context);
if (one_line) {
dest = thrust::device_malloc<int_type>(1);
dest[0] = cc;
}
else {
dest = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(dest, dest+(a->mRecCount), cc, (int_type)0);
};
exe_vectors.push(thrust::raw_pointer_cast(dest));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> dest;
float_type cc = mgpu::Reduce(thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), a->mRecCount, *context);
if (one_line) {
dest = thrust::device_malloc<float_type>(1);
dest[0] = cc;
}
else {
dest = thrust::device_malloc<float_type>(a->mRecCount);
thrust::sequence(dest, dest+a->mRecCount, cc, (float_type)0);
};
exe_vectors_f.push(thrust::raw_pointer_cast(dest));
exe_type.push("VECTOR F");
};
};
exe_precision.push(a->decimal_zeroes[s1_val]);
}
}
else if (ss.compare("MIN") == 0) {
grp_type = "MIN";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if(alloced_mem.empty()) {
alloc_pool(a->maxRecs);
};
thrust::device_ptr<unsigned int> d_di1((unsigned int*)alloced_mem.back());
thrust::copy(d_di, d_di+a->mRecCount,d_di1);
thrust::exclusive_scan(d_di1, d_di1+a->mRecCount, d_di1);
thrust::equal_to<unsigned int> binary_pred;
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
//ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), (int_type)0,
// mgpu::minimum<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
thrust::reduce_by_key(d_di1, d_di1+a->mRecCount, a->d_columns_int[s1_val].begin(),
thrust::make_discard_iterator(), count_diff,
binary_pred, thrust::minimum<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
//ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), (float_type)0,
// mgpu::minimum<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
thrust::reduce_by_key(d_di1, d_di1+a->mRecCount, a->d_columns_float[s1_val].begin(),
thrust::make_discard_iterator(), count_diff,
binary_pred, thrust::minimum<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
exe_precision.push(a->decimal_zeroes[s1_val]);
}
else if (ss.compare("MAX") == 0) {
grp_type = "MAX";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), (int_type)0,
mgpu::maximum<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), (float_type)0,
mgpu::maximum<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
//thrust::reduce_by_key(d_di, d_di+(a->mRecCount), a->d_columns_float[s1_val].begin(),
// thrust::make_discard_iterator(), count_diff,
// head_flag_predicate<bool>(), thrust::maximum<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
exe_precision.push(a->decimal_zeroes[s1_val]);
}
else if (ss.compare("AVG") == 0) {
grp_type = "AVG";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), (int_type)0,
mgpu::plus<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), (float_type)0,
mgpu::plus<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
exe_precision.push(a->decimal_zeroes[s1_val]);
};
};
if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("FLOAT") == 0 || ss.compare("VECTOR") == 0 || ss.compare("VECTOR F") == 0) {
exe_type.push(ss);
if (ss.compare("NUMBER") == 0) {
exe_nums.push(op_nums.front());
op_nums.pop();
exe_precision.push(op_nums_precision.front());
op_nums_precision.pop();
}
if (ss.compare("FLOAT") == 0) {
exe_nums_f.push(op_nums_f.front());
op_nums_f.pop();
}
else if (ss.compare("NAME") == 0) {
exe_value.push(op_value.front());
ts = a->ts_cols[op_value.front()];
op_value.pop();
}
}
else {
if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) {
// get 2 values from the stack
s1 = exe_type.top();
exe_type.pop();
s2 = exe_type.top();
exe_type.pop();
if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
n2 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1)
n1 = n1*(unsigned int)pow(10,p1);
if(p2)
n2 = n2*(unsigned int)pow(10,p2);
if (ss.compare("ADD") == 0 )
res = n1+n2;
else if (ss.compare("MUL") == 0 )
res = n1*n2;
else if (ss.compare("DIV") == 0 )
res = n1/n2;
else
res = n1-n2;
thrust::device_ptr<int_type> p = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(p, p+(a->mRecCount),res,(int_type)0);
exe_type.push("VECTOR");
exe_vectors.push(thrust::raw_pointer_cast(p));
}
else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2_f = exe_nums_f.top();
exe_nums_f.pop();
if (ss.compare("ADD") == 0 )
res_f = n1_f+n2_f;
else if (ss.compare("MUL") == 0 )
res_f = n1_f*n2_f;
else if (ss.compare("DIV") == 0 )
res_f = n1_f/n2_f;
else
res_f = n1_f-n2_f;
thrust::device_ptr<float_type> p = thrust::device_malloc<float_type>(a->mRecCount);
thrust::sequence(p, p+(a->mRecCount),res_f,(float_type)0);
exe_type.push("VECTOR F");
exe_vectors_f.push(thrust::raw_pointer_cast(p));
}
else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR F");
if (a->type[s1_val] == 1) {
float_type* t = a->get_float_type_by_name(s1_val);
exe_vectors_f.push(a->op(t,n1_f,ss,1));
}
else {
int_type* t = a->get_int_by_name(s1_val);
exe_vectors_f.push(a->op(t,n1_f,ss,1));
};
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR F");
if (a->type[s2_val] == 1) {
float_type* t = a->get_float_type_by_name(s2_val);
exe_vectors_f.push(a->op(t,n1_f,ss,0));
}
else {
int_type* t = a->get_int_by_name(s2_val);
exe_vectors_f.push(a->op(t,n1_f,ss,0));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = a->decimal_zeroes[s1_val];
if (a->type[s1_val] == 1) {
float_type* t = a->get_float_type_by_name(s1_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,(float_type)n1,ss,1));
}
else {
int_type* t = a->get_int_by_name(s1_val);
auto pres = precision_func(p2, p1, ss);
exe_precision.push(pres);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,n1,ss,1, p2, p1));
};
}
else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
s2_val = exe_value.top();
exe_value.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = a->decimal_zeroes[s2_val];
if (a->type[s2_val] == 1) {
float_type* t = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,(float_type)n1,ss,0));
}
else {
int_type* t = a->get_int_by_name(s2_val);
auto pres = precision_func(p2, p1, ss);
exe_precision.push(pres);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,n1,ss,0, p2, p1));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
if (a->type[s1_val] == 0) {
int_type* t1 = a->get_int_by_name(s1_val);
if (a->type[s2_val] == 0) {
int_type* t = a->get_int_by_name(s2_val);
auto p1 = a->decimal_zeroes[s1_val];
auto p2 = a->decimal_zeroes[s2_val];
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,t1,ss,0,p2,p1));
}
else {
float_type* t = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
};
}
else {
float_type* t = a->get_float_type_by_name(s1_val);
if (a->type[s2_val] == 0) {
int_type* t1 = a->get_int_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
}
else {
float_type* t1 = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
};
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0 ) && s2.compare("NAME") == 0) {
s2_val = exe_value.top();
exe_value.pop();
if (a->type[s2_val] == 0) {
int_type* t = a->get_int_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = a->decimal_zeroes[s2_val];
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(t,s3,ss,0,p2,p1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,0));
alloced_mem.push_back(s3);
}
}
else {
float_type* t = a->get_float_type_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,t, ss,0));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,0));
alloced_mem.push_back(s3);
}
};
}
else if ((s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0 ) && s1.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
if (a->type[s1_val] == 0) {
int_type* t = a->get_int_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = a->decimal_zeroes[s1_val];
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(t,s3,ss,1,p2,p1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,1));
alloced_mem.push_back(s3);
}
}
else {
float_type* t = a->get_float_type_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,t,ss,1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,1));
alloced_mem.push_back(s3);
}
};
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(s3,n1, ss,1, p1, p2));
//cudaFree(s3);
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,(float_type)n1, ss,1));
alloced_mem.push_back(s3);
}
}
else if (s1.compare("NUMBER") == 0 && (s2.compare("VECTOR") || s2.compare("VECTOR F") == 0)) {
n1 = exe_nums.top();
exe_nums.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p2, p1, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(s3,n1, ss,0, p2, p1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,(float_type)n1, ss,0));
alloced_mem.push_back(s3);
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,1));
alloced_mem.push_back(s3);
}
}
else if (s1.compare("FLOAT") == 0 && (s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0)) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,0));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,0));
alloced_mem.push_back(s3);
}
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
int_type* s4 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(s3, s4,ss,0,p1,p2));
alloced_mem.push_back(s3);
alloced_mem.push_back(s4);
}
else if(s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,1));
alloced_mem.push_back(s3);
alloced_mem.push_back(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,0));
alloced_mem.push_back(s3);
alloced_mem.push_back(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,1));
alloced_mem.push_back(s3);
alloced_mem.push_back(s4);
}
}
}
} //
else {
// here we need to save what is where
col_val.push(op_value.front());
op_value.pop();
grp_type1.push(grp_type);
if(!exe_nums.empty()) { //number
col_type.push(0);
exe_nums1.push(exe_nums.top());
exe_nums.pop();
exe_precision1.push(exe_precision.top());
exe_precision.pop();
};
if(!exe_value.empty()) { //field name
col_type.push(1);
exe_precision1.push(a->decimal_zeroes[exe_value.top()]);
exe_value1.push(exe_value.top());
exe_ts.push(ts);
exe_value.pop();
};
if(!exe_vectors.empty()) { //vector int
exe_vectors1.push(exe_vectors.top());
exe_vectors.pop();
col_type.push(2);
exe_precision1.push(exe_precision.top());
exe_precision.pop();
};
if(!exe_vectors_f.empty()) { //vector float
exe_vectors1_d.push(exe_vectors_f.top());
exe_vectors_f.pop();
col_type.push(3);
};
colCount++;
};
};
for(unsigned int j=0; j < colCount; j++) {
if ((grp_type1.top()).compare("COUNT") == 0 )
b->grp_type[col_val.top()] = 0;
else if ((grp_type1.top()).compare("AVG") == 0 )
b->grp_type[col_val.top()] = 1;
else if ((grp_type1.top()).compare("SUM") == 0 )
b->grp_type[col_val.top()] = 2;
else if ((grp_type1.top()).compare("NULL") == 0 )
b->grp_type[col_val.top()] = 3;
else if ((grp_type1.top()).compare("MIN") == 0 )
b->grp_type[col_val.top()] = 4;
else if ((grp_type1.top()).compare("MAX") == 0 )
b->grp_type[col_val.top()] = 5;
else if ((grp_type1.top()).compare("COUNTD") == 0 ) {
b->grp_type[col_val.top()] = 6;
};
if(col_type.top() == 0) {
// create a vector
if (a->grp_count) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::copy_if(thrust::make_constant_iterator((int)exe_nums1.top()), thrust::make_constant_iterator((int)exe_nums1.top()) + a->mRecCount, d_di, count_diff, thrust::identity<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , col_val.top(), res_size);
thrust::device_free(count_diff);
}
else {
thrust::device_ptr<int_type> s = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(s, s+(a->mRecCount), (int)exe_nums1.top(), 0);
b->addDeviceColumn(thrust::raw_pointer_cast(s), col_val.top(), a->mRecCount);
}
exe_nums1.pop();
b->decimal_zeroes[col_val.top()] = exe_precision1.top();
exe_precision1.pop();
}
else if(col_type.top() == 1) {
if(a->type[exe_value1.top()] == 0 || a->type[exe_value1.top()] == 2) {
//modify what we push there in case of a grouping
if (a->grp_count) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
//thrust::device_ptr<bool> d_grp(a->grp);
thrust::copy_if(a->d_columns_int[exe_value1.top()].begin(),a->d_columns_int[exe_value1.top()].begin() + a->mRecCount,
d_di, count_diff, thrust::identity<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , col_val.top(), res_size);
thrust::device_free(count_diff);
}
else
b->addDeviceColumn(thrust::raw_pointer_cast(a->d_columns_int[exe_value1.top()].data()) , col_val.top(), a->mRecCount);
if(a->type[exe_value1.top()] == 0) {
b->decimal_zeroes[col_val.top()] = exe_precision1.top();
b->ts_cols[col_val.top()] = exe_ts.top();
};
if(a->type[exe_value1.top()] == 2 || (a->type[exe_value1.top()] == 0 && a->string_map.find(exe_value1.top()) != a->string_map.end())) {
b->string_map[col_val.top()] = a->string_map[exe_value1.top()];
};
exe_precision1.pop();
exe_ts.pop();
}
else if(a->type[exe_value1.top()] == 1) {
//modify what we push there in case of a grouping
if (a->grp_count) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
//thrust::device_ptr<bool> d_grp(a->grp);
thrust::copy_if(a->d_columns_float[exe_value1.top()].begin(), a->d_columns_float[exe_value1.top()].begin() + a->mRecCount,
d_di, count_diff, thrust::identity<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , col_val.top(), res_size, a->decimal[exe_value1.top()]);
thrust::device_free(count_diff);
}
else {
b->addDeviceColumn(thrust::raw_pointer_cast(a->d_columns_float[exe_value1.top()].data()), col_val.top(), a->mRecCount, a->decimal[exe_value1.top()]);
};
}
exe_value1.pop();
}
else if(col_type.top() == 2) { // int
if (a->grp_count)
b->addDeviceColumn(exe_vectors1.top() , col_val.top(), res_size);
else {
if(!one_line)
b->addDeviceColumn(exe_vectors1.top() , col_val.top(), a->mRecCount);
else
b->addDeviceColumn(exe_vectors1.top() , col_val.top(), 1);
};
cudaFree(exe_vectors1.top());
exe_vectors1.pop();
b->decimal_zeroes[col_val.top()] = exe_precision1.top();
exe_precision1.pop();
}
else if(col_type.top() == 3) { //float
if (a->grp_count) {
b->addDeviceColumn(exe_vectors1_d.top() , col_val.top(), res_size, 1);
}
else {
if(!one_line) {
b->addDeviceColumn(exe_vectors1_d.top() , col_val.top(), a->mRecCount, 1);
}
else {
b->addDeviceColumn(exe_vectors1_d.top() , col_val.top(), 1, 1);
};
};
cudaFree(exe_vectors1_d.top());
exe_vectors1_d.pop();
};
col_type.pop();
col_val.pop();
grp_type1.pop();
};
if (!a->grp_count) {
if(!one_line)
b->mRecCount = a->mRecCount;
else
b->mRecCount = 1;
one_liner = one_line;
}
else {
b->mRecCount = res_size;
one_liner = 0;
};
}
|
fe7fe3276215636b024f4bb6612332b07cfa40fe.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/dgemv_mgpu.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ahmad Abdelfattah
* @date 2018-11-14
**/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include "gemv_mgpu_core.cuh"
#include "gemv_mgpu_offset_core.cuh"
#include "kblas_defs.h"
#if(TARGET_SM >= 30)
#define dgemvn_mgpu_bs (64)
#define dgemvn_mgpu_ty (4)
//#define dgemvn_mgpu_by (2)
#define dgemvt_mgpu_bs (64)
#define dgemvt_mgpu_ty (4)
//#define dgemvt_mgpu_by (2)
#else
#define dgemvn_mgpu_bs (64)
#define dgemvn_mgpu_ty (8)
//#define dgemvn_mgpu_by (1)
#define dgemvt_mgpu_bs (64)
#define dgemvt_mgpu_ty (8)
//#define dgemvt_mgpu_by (1)
#endif
extern "C"
int kblas_dscal_async(int n, double alpha, double *x, int incx, hipStream_t stream);
extern "C"
int kblas_dgemv_mgpu_driver( char trans, int rows, int cols,
double alpha, double *dA, int lda,
double *dX, int incx,
double beta, double *dY, int incy, int gpu_gid, int ngpus,
hipStream_t stream = 0)
{
const double d_zero = 0.0;
if(trans == 'n' || trans == 'N')
{
//******** config parameters
const int thread_x = dgemvn_mgpu_bs;
const int thread_y = dgemvn_mgpu_ty;
const int elements_per_thread = thread_x/(2*thread_y);
int grid_y_n = 1 * ngpus;
//**************************
// scaling with beta
//if(gpu_gid == 0)hipblasDscal(rows, beta, dY, incy);
if(gpu_gid == 0)kblas_dscal_async(rows, beta, dY, incy, stream);
else kblas_dscal_async(rows, d_zero, dY, incy, stream);
int cols_ = dgemvn_mgpu_bs * ( (cols/dgemvn_mgpu_bs)/ngpus );
if(gpu_gid < (cols/dgemvn_mgpu_bs)%ngpus) cols_ += dgemvn_mgpu_bs;
if(gpu_gid == (cols/dgemvn_mgpu_bs)%ngpus) cols_ += cols%dgemvn_mgpu_bs;
int mod_r = rows % dgemvn_mgpu_bs;
int mod_c = cols_ % dgemvn_mgpu_bs;
if(mod_r == 0)
{
if(mod_c == 0)
{
// special case
int blocks = rows/dgemvn_mgpu_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
hipLaunchKernelGGL(( gemvn_mgpu_special<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus);
}
else
{
// generic case for columns only
const int irregular_cols = mod_c % elements_per_thread;
int blocks = rows/dgemvn_mgpu_bs;
blocks += 1; // dummy thread block
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 1:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 2:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 3:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 4:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 5:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 6:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 7:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 8:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 9:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 10:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 11:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 12:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 13:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 14:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 15:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
default: printf("DGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else // mod_r != 0
{
if(mod_c == 0)
{
// generic case for columns only
int blocks = (rows/dgemvn_mgpu_bs) + (mod_r != 0);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus);
}
else
{
// generic case for rows and cols
const int irregular_cols = mod_c % elements_per_thread;
int blocks = (rows/dgemvn_mgpu_bs) + (mod_r != 0);
//printf("gpu_gid = %d, cols_ = %d \n", gpu_gid, cols_);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 1:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 2:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 3:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 4:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 5:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 6:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 7:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 8:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 9:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 10:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 11:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 12:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 13:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 14:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 15:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
default: printf("DGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
} // end of non-transpose case
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
{
int conj;
if(trans == 'c' || trans == 'C') conj = 1;
else conj = 0;
//************ config parameters
const int thread_x = dgemvt_mgpu_bs;
const int thread_y = dgemvt_mgpu_ty;
const int elements_per_thread = thread_x/(2*thread_y);
int grid_y_t = 1 * ngpus;
//******************************
// scaling with beta
//if(gpu_gid == 0)hipblasDscal(cols, beta, dY, incy);
if(gpu_gid == 0)kblas_dscal_async(cols, beta, dY, incy, stream);
else kblas_dscal_async(cols, d_zero, dY, incy, stream);
int cols_ = dgemvt_mgpu_bs * ( (cols/dgemvt_mgpu_bs)/ngpus );
if(gpu_gid < (cols/dgemvt_mgpu_bs)%ngpus) cols_ += dgemvt_mgpu_bs;
if(gpu_gid == (cols/dgemvt_mgpu_bs)%ngpus) cols_ += cols%dgemvt_mgpu_bs;
int mod_r = rows % dgemvt_mgpu_bs;
int mod_c = cols_ % dgemvt_mgpu_bs;
if(mod_c == 0)
{
if(mod_r == 0)
{
// special case
int blocks = cols_/dgemvt_mgpu_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
hipLaunchKernelGGL(( gemvt_mgpu_special<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, conj);
}
else
{
// mod_r != 0
int blocks = cols_/dgemvt_mgpu_bs;
blocks += 1; // dummy thread block
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj);
}
}
else // mod_c != 0
{
const int irregular_cols = mod_c % elements_per_thread;
int blocks = cols_/dgemvt_mgpu_bs + (mod_c != 0);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 1:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 2:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 3:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 4:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 5:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 6:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 7:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 8:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 9:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 10:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 11:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 12:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 13:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 14:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 15:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
default: printf("DGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else
{
printf("DGEMV error: Unrecognized transpose mode %c \n", trans);
return -1;
}
return 0;
}
/*************************************************************************************/
extern "C"
int kblas_dgemv_mgpu_driver_offset( char trans, int rows, int cols,
double alpha, double *dA, int lda,
double *dX, int incx,
double beta, double *dY, int incy, int gpu_gid, int ngpus,
int offset_r, int offset_c,
hipStream_t stream = 0)
{
const double d_zero = 0.0;
if(trans == 'n' || trans == 'N')
{
//**** Config parameters
const int thread_x = dgemvn_mgpu_bs;
const int thread_y = dgemvn_mgpu_ty;
const int elements_per_thread = thread_x/(2*thread_y);
const int grid_y_n = 2 * ngpus;
//*************************
/** offset necessary calculation **/
int offset_r_ = offset_r % dgemvn_mgpu_bs;
int offset_c_ = offset_c % dgemvn_mgpu_bs;
int total_blocks_skipped_r = offset_r / dgemvn_mgpu_bs;
int total_blocks_skipped_c = offset_c / dgemvn_mgpu_bs;
int my_skipped_blocks_r = total_blocks_skipped_r;
int my_skipped_blocks_c = total_blocks_skipped_c/ngpus;
if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1;
int ref_gpu = total_blocks_skipped_c%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
// Advance pointers accordingly
dA += my_skipped_blocks_c * dgemvn_mgpu_bs * lda;
dA += total_blocks_skipped_r * dgemvn_mgpu_bs;
dX += total_blocks_skipped_c * dgemvn_mgpu_bs * incx;
dY += my_skipped_blocks_r * dgemvn_mgpu_bs * incy;
rows -= total_blocks_skipped_r * dgemvn_mgpu_bs;
cols -= total_blocks_skipped_c * dgemvn_mgpu_bs;
/** end offset necessary calculation **/
int nstripes = (cols/dgemvn_mgpu_bs) + ((cols%dgemvn_mgpu_bs) != 0);
// scaling with beta
if(gpu_gid == 0)kblas_dscal_async(rows-offset_r_, beta, dY+(offset_r_*incy), incy, stream);
else kblas_dscal_async(rows-offset_r_, d_zero, dY+(offset_r_*incy), incy, stream);
int cols_ = dgemvn_mgpu_bs * ( (cols/dgemvn_mgpu_bs)/ngpus );
if(new_gpu_gid < (cols/dgemvn_mgpu_bs)%ngpus) cols_ += dgemvn_mgpu_bs;
if(new_gpu_gid == (cols/dgemvn_mgpu_bs)%ngpus) cols_ += cols%dgemvn_mgpu_bs;
int mod_r = rows % dgemvn_mgpu_bs;
int mod_c = cols_ % dgemvn_mgpu_bs;
if(mod_r == 0 && mod_c == 0)
{
// special case
int blocks = rows/dgemvn_mgpu_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
hipLaunchKernelGGL(( gemvn_mgpu_special_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread>)
, dim3(dimGrid), dim3(dimBlock), 0, stream,
rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_);
}
else
{
// generic case for columns only
const int irregular_cols = mod_c % elements_per_thread;
int blocks = (rows/dgemvn_mgpu_bs) + (mod_r != 0);
if(mod_r == 0)blocks += 1; // dummy thread block, will return immediately if mod_r == 0
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 1:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 2:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 3:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 4:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 5:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 6:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 7:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 8:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 9:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 10:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 11:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 12:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 13:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 14:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 15:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
default: printf("DGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
} // end of non-transpose case
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
{
int conj;
if(trans == 'c' || trans == 'C') conj = 1;
else conj = 0;
//**** Config parameters
const int thread_x = dgemvt_mgpu_bs;
const int thread_y = dgemvt_mgpu_ty;
const int elements_per_thread = thread_x/(2*thread_y);
const int grid_y_t = 2 * ngpus;
//*************************
/** offset necessary calculation **/
int offset_r_ = offset_r % dgemvt_mgpu_bs;
int offset_c_ = offset_c % dgemvt_mgpu_bs;
int total_blocks_skipped_r = offset_r / dgemvt_mgpu_bs;
int total_blocks_skipped_c = offset_c / dgemvt_mgpu_bs;
int my_skipped_blocks_r = total_blocks_skipped_r;
int my_skipped_blocks_c = total_blocks_skipped_c/ngpus;
if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1;
int ref_gpu = total_blocks_skipped_c%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
//if(new_gpu_gid != 3){return 0;}
// Advance pointers accordingly
dA += my_skipped_blocks_c * dgemvt_mgpu_bs * lda;
dA += my_skipped_blocks_r * dgemvt_mgpu_bs;
dX += total_blocks_skipped_r * dgemvt_mgpu_bs * incx;
dY += total_blocks_skipped_c * dgemvt_mgpu_bs * incy;
rows -= total_blocks_skipped_r * dgemvt_mgpu_bs;
cols -= total_blocks_skipped_c * dgemvt_mgpu_bs;
/** end offset necessary calculation **/
int nstripes = (cols/dgemvt_mgpu_bs) + ((cols%dgemvt_mgpu_bs) != 0);
// scaling with beta
//if(gpu_gid == 0)hipblasDscal(cols-offset_, beta, dY+(offset_*incy), incy);
if(gpu_gid == 0)kblas_dscal_async(cols-offset_c_, beta, dY+(offset_c_*incy), incy, stream);
else kblas_dscal_async(cols-offset_r_, d_zero, dY+(offset_r_*incy), incy, stream);
int cols_ = dgemvt_mgpu_bs * ( (cols/dgemvt_mgpu_bs)/ngpus );
if(new_gpu_gid < (cols/dgemvt_mgpu_bs)%ngpus) cols_ += dgemvt_mgpu_bs;
if(new_gpu_gid == (cols/dgemvt_mgpu_bs)%ngpus) cols_ += cols%dgemvt_mgpu_bs;
int mod_r = rows % dgemvt_mgpu_bs;
int mod_c = cols_ % dgemvt_mgpu_bs;
if(mod_r == 0 && mod_c == 0)
{
int blocks = cols_/dgemvt_mgpu_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
hipLaunchKernelGGL(( gemvt_mgpu_special_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj);
}
else
{
const int irregular_cols = mod_c % elements_per_thread;
int blocks = cols_/dgemvt_mgpu_bs + (mod_c != 0);
int gpu_last = (nstripes+ngpus-1)%ngpus;
if(mod_c == 0 && new_gpu_gid == gpu_last) blocks += 1; // dummy thread block, will return if mod_c == 0
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 1:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 2:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 3:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 4:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 5:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 6:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 7:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 8:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 9:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 10:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 11:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 12:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 13:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 14:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 15:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
default: printf("DGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else
{
printf("DGEMV error: Unrecognized transpose mode %c \n", trans);
return -1;
}
return 0;
}
/***********************************************************************************/
extern "C"
int kblas_dgemv_mgpu( char trans, int rows, int cols,
double alpha, double **dA, int lda,
double **dX, int incx,
double beta, double **dY, int incy,
int ngpus,
int offset_r, int offset_c)
{
const int ngpus_local = ngpus;
if(offset_r == 0 && offset_c == 0)
{
for(int i = 0; i < ngpus_local; i++)
{
hipSetDevice(gpu_lid[i]);
kblas_dgemv_mgpu_driver(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus);
}
}
else
{
for(int i = 0; i < ngpus_local; i++)
{
hipSetDevice(gpu_lid[i]);
kblas_dgemv_mgpu_driver_offset(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, offset_r, offset_c);
}
}
// wait for gpus to finish
for(int i = 0; i < ngpus_local; i++)
{
hipSetDevice(gpu_lid[i]);
hipDeviceSynchronize();
}
return 0;
}
/*************************************************************************************/
extern "C"
int kblas_dgemv_mgpu_async( char trans, int rows, int cols,
double alpha, double **dA, int lda,
double **dX, int incx,
double beta, double **dY, int incy,
int ngpus,
int offset_r, int offset_c,
hipStream_t stream[MAX_NGPUS][MAX_STREAMS])
{
const int ngpus_local = ngpus;
if(offset_r == 0 && offset_c == 0)
{
for(int i = 0; i < ngpus_local; i++)
{
hipSetDevice(gpu_lid[i]);
kblas_dgemv_mgpu_driver(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, stream[i][0]);
}
}
else
{
for(int i = 0; i < ngpus_local; i++)
{
hipSetDevice(gpu_lid[i]);
kblas_dgemv_mgpu_driver_offset(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, offset_r, offset_c);
}
}
return 0;
}
/*************************************************************************************/
extern "C"
int get_dgemv_mgpu_bs(char trans)
{
if(trans == 'n' || trans == 'N')
return dgemvn_mgpu_bs;
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
return dgemvt_mgpu_bs;
else
{printf("Error .. input %c is not supported for gemv \n", trans); return -1;}
}
| fe7fe3276215636b024f4bb6612332b07cfa40fe.cu | /**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/dgemv_mgpu.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ahmad Abdelfattah
* @date 2018-11-14
**/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cublas.h>
#include "gemv_mgpu_core.cuh"
#include "gemv_mgpu_offset_core.cuh"
#include "kblas_defs.h"
#if(TARGET_SM >= 30)
#define dgemvn_mgpu_bs (64)
#define dgemvn_mgpu_ty (4)
//#define dgemvn_mgpu_by (2)
#define dgemvt_mgpu_bs (64)
#define dgemvt_mgpu_ty (4)
//#define dgemvt_mgpu_by (2)
#else
#define dgemvn_mgpu_bs (64)
#define dgemvn_mgpu_ty (8)
//#define dgemvn_mgpu_by (1)
#define dgemvt_mgpu_bs (64)
#define dgemvt_mgpu_ty (8)
//#define dgemvt_mgpu_by (1)
#endif
extern "C"
int kblas_dscal_async(int n, double alpha, double *x, int incx, cudaStream_t stream);
extern "C"
int kblas_dgemv_mgpu_driver( char trans, int rows, int cols,
double alpha, double *dA, int lda,
double *dX, int incx,
double beta, double *dY, int incy, int gpu_gid, int ngpus,
cudaStream_t stream = 0)
{
const double d_zero = 0.0;
if(trans == 'n' || trans == 'N')
{
//******** config parameters
const int thread_x = dgemvn_mgpu_bs;
const int thread_y = dgemvn_mgpu_ty;
const int elements_per_thread = thread_x/(2*thread_y);
int grid_y_n = 1 * ngpus;
//**************************
// scaling with beta
//if(gpu_gid == 0)cublasDscal(rows, beta, dY, incy);
if(gpu_gid == 0)kblas_dscal_async(rows, beta, dY, incy, stream);
else kblas_dscal_async(rows, d_zero, dY, incy, stream);
int cols_ = dgemvn_mgpu_bs * ( (cols/dgemvn_mgpu_bs)/ngpus );
if(gpu_gid < (cols/dgemvn_mgpu_bs)%ngpus) cols_ += dgemvn_mgpu_bs;
if(gpu_gid == (cols/dgemvn_mgpu_bs)%ngpus) cols_ += cols%dgemvn_mgpu_bs;
int mod_r = rows % dgemvn_mgpu_bs;
int mod_c = cols_ % dgemvn_mgpu_bs;
if(mod_r == 0)
{
if(mod_c == 0)
{
// special case
int blocks = rows/dgemvn_mgpu_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
gemvn_mgpu_special<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus);
}
else
{
// generic case for columns only
const int irregular_cols = mod_c % elements_per_thread;
int blocks = rows/dgemvn_mgpu_bs;
blocks += 1; // dummy thread block
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 1: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 2: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 3: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 4: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 5: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 6: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 7: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 8: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 9: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 10: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 11: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 12: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 13: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 14: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 15: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
default: printf("DGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else // mod_r != 0
{
if(mod_c == 0)
{
// generic case for columns only
int blocks = (rows/dgemvn_mgpu_bs) + (mod_r != 0);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus);
}
else
{
// generic case for rows and cols
const int irregular_cols = mod_c % elements_per_thread;
int blocks = (rows/dgemvn_mgpu_bs) + (mod_r != 0);
//printf("gpu_gid = %d, cols_ = %d \n", gpu_gid, cols_);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 1: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 2: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 3: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 4: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 5: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 6: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 7: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 8: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 9: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 10: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 11: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 12: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 13: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 14: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 15: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
default: printf("DGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
} // end of non-transpose case
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
{
int conj;
if(trans == 'c' || trans == 'C') conj = 1;
else conj = 0;
//************ config parameters
const int thread_x = dgemvt_mgpu_bs;
const int thread_y = dgemvt_mgpu_ty;
const int elements_per_thread = thread_x/(2*thread_y);
int grid_y_t = 1 * ngpus;
//******************************
// scaling with beta
//if(gpu_gid == 0)cublasDscal(cols, beta, dY, incy);
if(gpu_gid == 0)kblas_dscal_async(cols, beta, dY, incy, stream);
else kblas_dscal_async(cols, d_zero, dY, incy, stream);
int cols_ = dgemvt_mgpu_bs * ( (cols/dgemvt_mgpu_bs)/ngpus );
if(gpu_gid < (cols/dgemvt_mgpu_bs)%ngpus) cols_ += dgemvt_mgpu_bs;
if(gpu_gid == (cols/dgemvt_mgpu_bs)%ngpus) cols_ += cols%dgemvt_mgpu_bs;
int mod_r = rows % dgemvt_mgpu_bs;
int mod_c = cols_ % dgemvt_mgpu_bs;
if(mod_c == 0)
{
if(mod_r == 0)
{
// special case
int blocks = cols_/dgemvt_mgpu_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
gemvt_mgpu_special<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, conj);
}
else
{
// mod_r != 0
int blocks = cols_/dgemvt_mgpu_bs;
blocks += 1; // dummy thread block
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj);
}
}
else // mod_c != 0
{
const int irregular_cols = mod_c % elements_per_thread;
int blocks = cols_/dgemvt_mgpu_bs + (mod_c != 0);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 1: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 2: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 3: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 4: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 5: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 6: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 7: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 8: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 9: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 10: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 11: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 12: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 13: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 14: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 15: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
default: printf("DGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else
{
printf("DGEMV error: Unrecognized transpose mode %c \n", trans);
return -1;
}
return 0;
}
/*************************************************************************************/
extern "C"
int kblas_dgemv_mgpu_driver_offset( char trans, int rows, int cols,
double alpha, double *dA, int lda,
double *dX, int incx,
double beta, double *dY, int incy, int gpu_gid, int ngpus,
int offset_r, int offset_c,
cudaStream_t stream = 0)
{
const double d_zero = 0.0;
if(trans == 'n' || trans == 'N')
{
//**** Config parameters
const int thread_x = dgemvn_mgpu_bs;
const int thread_y = dgemvn_mgpu_ty;
const int elements_per_thread = thread_x/(2*thread_y);
const int grid_y_n = 2 * ngpus;
//*************************
/** offset necessary calculation **/
int offset_r_ = offset_r % dgemvn_mgpu_bs;
int offset_c_ = offset_c % dgemvn_mgpu_bs;
int total_blocks_skipped_r = offset_r / dgemvn_mgpu_bs;
int total_blocks_skipped_c = offset_c / dgemvn_mgpu_bs;
int my_skipped_blocks_r = total_blocks_skipped_r;
int my_skipped_blocks_c = total_blocks_skipped_c/ngpus;
if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1;
int ref_gpu = total_blocks_skipped_c%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
// Advance pointers accordingly
dA += my_skipped_blocks_c * dgemvn_mgpu_bs * lda;
dA += total_blocks_skipped_r * dgemvn_mgpu_bs;
dX += total_blocks_skipped_c * dgemvn_mgpu_bs * incx;
dY += my_skipped_blocks_r * dgemvn_mgpu_bs * incy;
rows -= total_blocks_skipped_r * dgemvn_mgpu_bs;
cols -= total_blocks_skipped_c * dgemvn_mgpu_bs;
/** end offset necessary calculation **/
int nstripes = (cols/dgemvn_mgpu_bs) + ((cols%dgemvn_mgpu_bs) != 0);
// scaling with beta
if(gpu_gid == 0)kblas_dscal_async(rows-offset_r_, beta, dY+(offset_r_*incy), incy, stream);
else kblas_dscal_async(rows-offset_r_, d_zero, dY+(offset_r_*incy), incy, stream);
int cols_ = dgemvn_mgpu_bs * ( (cols/dgemvn_mgpu_bs)/ngpus );
if(new_gpu_gid < (cols/dgemvn_mgpu_bs)%ngpus) cols_ += dgemvn_mgpu_bs;
if(new_gpu_gid == (cols/dgemvn_mgpu_bs)%ngpus) cols_ += cols%dgemvn_mgpu_bs;
int mod_r = rows % dgemvn_mgpu_bs;
int mod_c = cols_ % dgemvn_mgpu_bs;
if(mod_r == 0 && mod_c == 0)
{
// special case
int blocks = rows/dgemvn_mgpu_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
gemvn_mgpu_special_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread>
<<<dimGrid, dimBlock, 0, stream>>>
(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_);
}
else
{
// generic case for columns only
const int irregular_cols = mod_c % elements_per_thread;
int blocks = (rows/dgemvn_mgpu_bs) + (mod_r != 0);
if(mod_r == 0)blocks += 1; // dummy thread block, will return immediately if mod_r == 0
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 1: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 2: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 3: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 4: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 5: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 6: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 7: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 8: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 9: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 10: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 11: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 12: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 13: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 14: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 15: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
default: printf("DGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
} // end of non-transpose case
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
{
int conj;
if(trans == 'c' || trans == 'C') conj = 1;
else conj = 0;
//**** Config parameters
const int thread_x = dgemvt_mgpu_bs;
const int thread_y = dgemvt_mgpu_ty;
const int elements_per_thread = thread_x/(2*thread_y);
const int grid_y_t = 2 * ngpus;
//*************************
/** offset necessary calculation **/
int offset_r_ = offset_r % dgemvt_mgpu_bs;
int offset_c_ = offset_c % dgemvt_mgpu_bs;
int total_blocks_skipped_r = offset_r / dgemvt_mgpu_bs;
int total_blocks_skipped_c = offset_c / dgemvt_mgpu_bs;
int my_skipped_blocks_r = total_blocks_skipped_r;
int my_skipped_blocks_c = total_blocks_skipped_c/ngpus;
if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1;
int ref_gpu = total_blocks_skipped_c%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
//if(new_gpu_gid != 3){return 0;}
// Advance pointers accordingly
dA += my_skipped_blocks_c * dgemvt_mgpu_bs * lda;
dA += my_skipped_blocks_r * dgemvt_mgpu_bs;
dX += total_blocks_skipped_r * dgemvt_mgpu_bs * incx;
dY += total_blocks_skipped_c * dgemvt_mgpu_bs * incy;
rows -= total_blocks_skipped_r * dgemvt_mgpu_bs;
cols -= total_blocks_skipped_c * dgemvt_mgpu_bs;
/** end offset necessary calculation **/
int nstripes = (cols/dgemvt_mgpu_bs) + ((cols%dgemvt_mgpu_bs) != 0);
// scaling with beta
//if(gpu_gid == 0)cublasDscal(cols-offset_, beta, dY+(offset_*incy), incy);
if(gpu_gid == 0)kblas_dscal_async(cols-offset_c_, beta, dY+(offset_c_*incy), incy, stream);
else kblas_dscal_async(cols-offset_r_, d_zero, dY+(offset_r_*incy), incy, stream);
int cols_ = dgemvt_mgpu_bs * ( (cols/dgemvt_mgpu_bs)/ngpus );
if(new_gpu_gid < (cols/dgemvt_mgpu_bs)%ngpus) cols_ += dgemvt_mgpu_bs;
if(new_gpu_gid == (cols/dgemvt_mgpu_bs)%ngpus) cols_ += cols%dgemvt_mgpu_bs;
int mod_r = rows % dgemvt_mgpu_bs;
int mod_c = cols_ % dgemvt_mgpu_bs;
if(mod_r == 0 && mod_c == 0)
{
int blocks = cols_/dgemvt_mgpu_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
gemvt_mgpu_special_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj);
}
else
{
const int irregular_cols = mod_c % elements_per_thread;
int blocks = cols_/dgemvt_mgpu_bs + (mod_c != 0);
int gpu_last = (nstripes+ngpus-1)%ngpus;
if(mod_c == 0 && new_gpu_gid == gpu_last) blocks += 1; // dummy thread block, will return if mod_c == 0
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 1: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 2: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 3: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 4: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 5: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 6: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 7: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 8: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 9: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 10: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 11: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 12: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 13: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 14: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 15: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
default: printf("DGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else
{
printf("DGEMV error: Unrecognized transpose mode %c \n", trans);
return -1;
}
return 0;
}
/***********************************************************************************/
extern "C"
int kblas_dgemv_mgpu( char trans, int rows, int cols,
double alpha, double **dA, int lda,
double **dX, int incx,
double beta, double **dY, int incy,
int ngpus,
int offset_r, int offset_c)
{
const int ngpus_local = ngpus;
if(offset_r == 0 && offset_c == 0)
{
for(int i = 0; i < ngpus_local; i++)
{
cudaSetDevice(gpu_lid[i]);
kblas_dgemv_mgpu_driver(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus);
}
}
else
{
for(int i = 0; i < ngpus_local; i++)
{
cudaSetDevice(gpu_lid[i]);
kblas_dgemv_mgpu_driver_offset(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, offset_r, offset_c);
}
}
// wait for gpus to finish
for(int i = 0; i < ngpus_local; i++)
{
cudaSetDevice(gpu_lid[i]);
cudaDeviceSynchronize();
}
return 0;
}
/*************************************************************************************/
extern "C"
int kblas_dgemv_mgpu_async( char trans, int rows, int cols,
double alpha, double **dA, int lda,
double **dX, int incx,
double beta, double **dY, int incy,
int ngpus,
int offset_r, int offset_c,
cudaStream_t stream[MAX_NGPUS][MAX_STREAMS])
{
const int ngpus_local = ngpus;
if(offset_r == 0 && offset_c == 0)
{
for(int i = 0; i < ngpus_local; i++)
{
cudaSetDevice(gpu_lid[i]);
kblas_dgemv_mgpu_driver(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, stream[i][0]);
}
}
else
{
for(int i = 0; i < ngpus_local; i++)
{
cudaSetDevice(gpu_lid[i]);
kblas_dgemv_mgpu_driver_offset(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, offset_r, offset_c);
}
}
return 0;
}
/*************************************************************************************/
extern "C"
int get_dgemv_mgpu_bs(char trans)
{
if(trans == 'n' || trans == 'N')
return dgemvn_mgpu_bs;
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
return dgemvt_mgpu_bs;
else
{printf("Error .. input %c is not supported for gemv \n", trans); return -1;}
}
|
c720ca84927897e8b3bc2ff5183b0190a7e1ea92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ void HSV2RGB(float h, float s, float v, float &r, float &g, float &b) {
if(h < 0) {
r=v;
g=v;
b=v;
return;
}
h *= .0166666666666667; // convert from 360 to 0-6;
int i = (int) floor(h);
float f = h - i;
f = (!(i&1)) ? 1-f : f; // if even
float m = v * (1-s);
float n = v * (1-s * f);
switch(i) {
case 6:
case 0:
r = v;
g= n;
b = m;
return;
case 1:
r = n;
g= v;
b = m;
return;
case 2:
r = m;
g= v;
b = n;
return;
case 3:
r = m;
g= n;
b = v;
return;
case 4:
r = n;
g= m;
b = v;
return;
case 5:
r = v;
g= n;
b = m;
return;
}
}
__global__ void gpu_calcColor_kernel(int pointCnt, float* pixels, float minZ, float diffZ, float minC1, float minC2, float minC3, float diffC1, float diffC2, float diffC3, bool hsv, bool quads, float* colors) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
bool validPoint = ((i > 0) && (i < pointCnt))? true : false;
i *=3;
float z = validPoint ? pixels[i+2] : 0.0;
if(z != 0.0) { // only equal to 0.0 if set as invalid in cloud contructor
float distZ = z - minZ;
float percent = distZ/diffZ;
percent = percent<=0.0 ? 0.01 : percent;
percent = percent>=1.0 ? 1.0 : percent;
float r;
float g;
float b;
if(hsv) {
if(percent == 0.0) {
HSV2RGB(minC1 , minC2, minC3 , r,g,b);
} else {
HSV2RGB(minC1 + percent * diffC1, minC2 + percent * diffC2, minC3 + percent * diffC3, r,g,b);
}
} else {
r= minC1 + percent * diffC1;
g= minC2 + percent * diffC2;
b= minC3 + percent * diffC3;
}
if(quads) {
i*=4;
colors[i++] = r;
colors[i++] = g;
colors[i++] = b;
colors[i++] = r;
colors[i++] = g;
colors[i++] = b;
colors[i++] = r;
colors[i++] = g;
colors[i++] = b;
colors[i++] = r;
colors[i++] = g;
colors[i] = b;
} else {
colors[i++] = r;
colors[i++] = g;
colors[i] = b;
}
}
}
extern "C" void gpu_calcColor(int pointCnt, float* pixels, float minZ, float diffZ, float minC1, float minC2, float minC3, float diffC1, float diffC2, float diffC3, bool hsv, bool quads, float* colors)
{
int theadsPerBlock = 256;
int blocks = (int) ceilf(pointCnt/(float) theadsPerBlock);
hipLaunchKernelGGL(( gpu_calcColor_kernel) , dim3(blocks),dim3(theadsPerBlock), 0, 0, pointCnt, pixels, minZ, diffZ, minC1, minC2, minC3, diffC1, diffC2, diffC3, hsv, quads, colors);
};
| c720ca84927897e8b3bc2ff5183b0190a7e1ea92.cu |
__device__ void HSV2RGB(float h, float s, float v, float &r, float &g, float &b) {
if(h < 0) {
r=v;
g=v;
b=v;
return;
}
h *= .0166666666666667; // convert from 360 to 0-6;
int i = (int) floor(h);
float f = h - i;
f = (!(i&1)) ? 1-f : f; // if even
float m = v * (1-s);
float n = v * (1-s * f);
switch(i) {
case 6:
case 0:
r = v;
g= n;
b = m;
return;
case 1:
r = n;
g= v;
b = m;
return;
case 2:
r = m;
g= v;
b = n;
return;
case 3:
r = m;
g= n;
b = v;
return;
case 4:
r = n;
g= m;
b = v;
return;
case 5:
r = v;
g= n;
b = m;
return;
}
}
__global__ void gpu_calcColor_kernel(int pointCnt, float* pixels, float minZ, float diffZ, float minC1, float minC2, float minC3, float diffC1, float diffC2, float diffC3, bool hsv, bool quads, float* colors) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
bool validPoint = ((i > 0) && (i < pointCnt))? true : false;
i *=3;
float z = validPoint ? pixels[i+2] : 0.0;
if(z != 0.0) { // only equal to 0.0 if set as invalid in cloud contructor
float distZ = z - minZ;
float percent = distZ/diffZ;
percent = percent<=0.0 ? 0.01 : percent;
percent = percent>=1.0 ? 1.0 : percent;
float r;
float g;
float b;
if(hsv) {
if(percent == 0.0) {
HSV2RGB(minC1 , minC2, minC3 , r,g,b);
} else {
HSV2RGB(minC1 + percent * diffC1, minC2 + percent * diffC2, minC3 + percent * diffC3, r,g,b);
}
} else {
r= minC1 + percent * diffC1;
g= minC2 + percent * diffC2;
b= minC3 + percent * diffC3;
}
if(quads) {
i*=4;
colors[i++] = r;
colors[i++] = g;
colors[i++] = b;
colors[i++] = r;
colors[i++] = g;
colors[i++] = b;
colors[i++] = r;
colors[i++] = g;
colors[i++] = b;
colors[i++] = r;
colors[i++] = g;
colors[i] = b;
} else {
colors[i++] = r;
colors[i++] = g;
colors[i] = b;
}
}
}
extern "C" void gpu_calcColor(int pointCnt, float* pixels, float minZ, float diffZ, float minC1, float minC2, float minC3, float diffC1, float diffC2, float diffC3, bool hsv, bool quads, float* colors)
{
int theadsPerBlock = 256;
int blocks = (int) ceilf(pointCnt/(float) theadsPerBlock);
gpu_calcColor_kernel <<<blocks,theadsPerBlock>>> (pointCnt, pixels, minZ, diffZ, minC1, minC2, minC3, diffC1, diffC2, diffC3, hsv, quads, colors);
};
|
ea81ffcea55e7a55b4543c88c7fe3f61aed1af36.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include "cuda_scale_image.cuh"
#include "cuda_includes.h"
#include "common_structs.cuh"
#include "cuda_bilinear.cuh"
#include "cuda_kernels.cuh"
#include "cuda_kernel_sobel.cuh"
#include "cuda_nearest_neighbor.cuh"
#include "cuda_scale_pixel_precalculation.cuh"
namespace cuda_seq
{
void scale_and_apply_sobel(png_user_struct* result_image, png_user_struct* source_image, pixel_precalculation* d_x_pixel_precalculation_ptr,
pixel_precalculation* d_y_pixel_precalculation_ptr, kernel_mode kernel_mode);
void scale_image_apply_sobel(png_user_struct* source_image, png_user_struct* result_image, kernel_mode kernel_mode)
{
const auto old_height = source_image->image_info.height;
const auto new_height = result_image->image_info.height;
const auto old_width = source_image->image_info.width;
const auto new_width = result_image->image_info.width;
pixel_precalculation_memory precalculation_xy;
create_pixel_precalculation(&precalculation_xy, old_width, new_width, old_height, new_height);
scale_and_apply_sobel(result_image, source_image, precalculation_xy.d_x, precalculation_xy.d_y, kernel_mode);
hipFree(precalculation_xy.allocated_gpu_memory);
}
void scale_and_apply_sobel(png_user_struct* result_image, png_user_struct* source_image, pixel_precalculation* d_x_pixel_precalculation_ptr,
pixel_precalculation* d_y_pixel_precalculation_ptr, kernel_mode kernel_mode)
{
auto source_width = source_image->image_info.width;
auto source_height = source_image->image_info.height;
auto new_height = result_image->image_info.height;
auto new_width = result_image->image_info.width;
auto dimensions_size_in_bytes = sizeof(dimensions_info);
auto source_png_size_in_bytes = sizeof(png_byte) * source_width * source_height;
auto result_image_size_in_bytes = sizeof(png_byte) * new_width * new_height;
auto sobel_image_size_in_bytes = result_image_size_in_bytes;
auto needed_memory_in_bytes = dimensions_size_in_bytes + source_png_size_in_bytes + result_image_size_in_bytes + sobel_image_size_in_bytes;
_int8* allocated_memory_on_gpu_p;
hipMalloc(reinterpret_cast<void**>(&allocated_memory_on_gpu_p), needed_memory_in_bytes);
d_scale_params d_scale_params;
d_sobel_params d_sobel_params;
d_scale_params.x_precalculation_p = d_x_pixel_precalculation_ptr;
d_scale_params.y_precalculation_p = d_y_pixel_precalculation_ptr;
d_scale_params.dimensions_info_p = reinterpret_cast<dimensions_info*>(allocated_memory_on_gpu_p);
d_scale_params.source_bytes_sequential_p = reinterpret_cast<png_bytep>(allocated_memory_on_gpu_p + dimensions_size_in_bytes);
d_scale_params.result_image_bytes_sequential_p = reinterpret_cast<png_bytep>(allocated_memory_on_gpu_p + dimensions_size_in_bytes + source_png_size_in_bytes);
d_sobel_params.source_bytes_sequential_p = d_scale_params.result_image_bytes_sequential_p;
d_sobel_params.result_bytes_sequential_p = reinterpret_cast<png_bytep>(allocated_memory_on_gpu_p + dimensions_size_in_bytes + source_png_size_in_bytes + result_image_size_in_bytes);
//pp_array that contains the source image needs to be flattened for fast memory allocation on gpu
png_bytep png_source_bytes_p = png_util_create_flat_bytes_p_from_row_pp(source_image->png_rows, source_width, source_height, source_png_size_in_bytes);
hipMemcpy(d_scale_params.source_bytes_sequential_p, png_source_bytes_p, source_png_size_in_bytes, hipMemcpyHostToDevice);
dimensions_info dimensions_inf;
dimensions_inf.result_image_width = new_width;
dimensions_inf.result_image_height = new_height;
dimensions_inf.source_image_width = source_width;
hipMemcpy(d_scale_params.dimensions_info_p, &dimensions_inf, sizeof(dimensions_info), hipMemcpyHostToDevice);
d_sobel_params.dimensions_inf_p = d_scale_params.dimensions_info_p;
dim3 block_size(32, 32);
unsigned blocks_in_x_direction;
unsigned blocks_in_y_direction;
dim3 grid_size;
unsigned y_offset = new_height / 2;
switch (kernel_mode) {
case kernel_mode::bilinear_nn:
blocks_in_x_direction = (new_width + block_size.x - 1) / block_size.x;
blocks_in_y_direction = ((new_height / 2) + block_size.y - 1) / block_size.y;
grid_size = dim3(blocks_in_x_direction, blocks_in_y_direction);
global::apply_bilinear_filter << <grid_size, block_size>> > (d_scale_params, 0, y_offset);
global::apply_nearest_neighbor << <grid_size, block_size>> > (d_scale_params, y_offset, new_height);
hipMemcpy(result_image->png_rows[0], d_scale_params.result_image_bytes_sequential_p, result_image_size_in_bytes, hipMemcpyDeviceToHost);
break;
case kernel_mode::bilinear_nn_sobel:
blocks_in_x_direction = (new_width + block_size.x - 1) / block_size.x;
blocks_in_y_direction = ((new_height / 2) + block_size.y - 1) / block_size.y;
grid_size = dim3(blocks_in_x_direction, blocks_in_y_direction);
global::apply_bilinear_filter << <grid_size, block_size>> > (d_scale_params, 0, y_offset);
global::apply_nearest_neighbor << <grid_size, block_size>> > (d_scale_params, y_offset, new_height);
blocks_in_y_direction = (new_height + block_size.y - 1) / block_size.y;
grid_size = dim3(blocks_in_x_direction, blocks_in_y_direction);
apply_sobel_filter << <grid_size, block_size >> > (d_sobel_params, 0, new_height);
// blocks_in_x_direction = (new_width * 16 + block_size.x - 1) / block_size.x;
// blocks_in_y_direction = (new_height + block_size.y - 1) / block_size.y;
//
// grid_size = dim3(blocks_in_x_direction, blocks_in_y_direction);
//
// sobel_cooperative_groups_tile16_8::apply_sobel_filter << <grid_size, block_size >> > (d_sobel_params);
hipMemcpy(result_image->png_rows[0], d_sobel_params.result_bytes_sequential_p, result_image_size_in_bytes, hipMemcpyDeviceToHost);
break;
case kernel_mode::branch_bilinear_nn_dynamic_sobel:
blocks_in_x_direction = (new_width + block_size.x - 1) / block_size.x;
blocks_in_y_direction = (new_height + block_size.y - 1) / block_size.y;
grid_size = dim3(blocks_in_x_direction, blocks_in_y_direction);
bilinear_nn_sobel << <grid_size, block_size>> > (d_scale_params, d_sobel_params);
hipMemcpy(result_image->png_rows[0], d_sobel_params.result_bytes_sequential_p, result_image_size_in_bytes, hipMemcpyDeviceToHost);
break;
default:;
}
hipFree(allocated_memory_on_gpu_p);
}
}
| ea81ffcea55e7a55b4543c88c7fe3f61aed1af36.cu | #include <cstdlib>
#include "cuda_scale_image.cuh"
#include "cuda_includes.h"
#include "common_structs.cuh"
#include "cuda_bilinear.cuh"
#include "cuda_kernels.cuh"
#include "cuda_kernel_sobel.cuh"
#include "cuda_nearest_neighbor.cuh"
#include "cuda_scale_pixel_precalculation.cuh"
namespace cuda_seq
{
void scale_and_apply_sobel(png_user_struct* result_image, png_user_struct* source_image, pixel_precalculation* d_x_pixel_precalculation_ptr,
pixel_precalculation* d_y_pixel_precalculation_ptr, kernel_mode kernel_mode);
void scale_image_apply_sobel(png_user_struct* source_image, png_user_struct* result_image, kernel_mode kernel_mode)
{
const auto old_height = source_image->image_info.height;
const auto new_height = result_image->image_info.height;
const auto old_width = source_image->image_info.width;
const auto new_width = result_image->image_info.width;
pixel_precalculation_memory precalculation_xy;
create_pixel_precalculation(&precalculation_xy, old_width, new_width, old_height, new_height);
scale_and_apply_sobel(result_image, source_image, precalculation_xy.d_x, precalculation_xy.d_y, kernel_mode);
cudaFree(precalculation_xy.allocated_gpu_memory);
}
void scale_and_apply_sobel(png_user_struct* result_image, png_user_struct* source_image, pixel_precalculation* d_x_pixel_precalculation_ptr,
pixel_precalculation* d_y_pixel_precalculation_ptr, kernel_mode kernel_mode)
{
auto source_width = source_image->image_info.width;
auto source_height = source_image->image_info.height;
auto new_height = result_image->image_info.height;
auto new_width = result_image->image_info.width;
auto dimensions_size_in_bytes = sizeof(dimensions_info);
auto source_png_size_in_bytes = sizeof(png_byte) * source_width * source_height;
auto result_image_size_in_bytes = sizeof(png_byte) * new_width * new_height;
auto sobel_image_size_in_bytes = result_image_size_in_bytes;
auto needed_memory_in_bytes = dimensions_size_in_bytes + source_png_size_in_bytes + result_image_size_in_bytes + sobel_image_size_in_bytes;
_int8* allocated_memory_on_gpu_p;
cudaMalloc(reinterpret_cast<void**>(&allocated_memory_on_gpu_p), needed_memory_in_bytes);
d_scale_params d_scale_params;
d_sobel_params d_sobel_params;
d_scale_params.x_precalculation_p = d_x_pixel_precalculation_ptr;
d_scale_params.y_precalculation_p = d_y_pixel_precalculation_ptr;
d_scale_params.dimensions_info_p = reinterpret_cast<dimensions_info*>(allocated_memory_on_gpu_p);
d_scale_params.source_bytes_sequential_p = reinterpret_cast<png_bytep>(allocated_memory_on_gpu_p + dimensions_size_in_bytes);
d_scale_params.result_image_bytes_sequential_p = reinterpret_cast<png_bytep>(allocated_memory_on_gpu_p + dimensions_size_in_bytes + source_png_size_in_bytes);
d_sobel_params.source_bytes_sequential_p = d_scale_params.result_image_bytes_sequential_p;
d_sobel_params.result_bytes_sequential_p = reinterpret_cast<png_bytep>(allocated_memory_on_gpu_p + dimensions_size_in_bytes + source_png_size_in_bytes + result_image_size_in_bytes);
//pp_array that contains the source image needs to be flattened for fast memory allocation on gpu
png_bytep png_source_bytes_p = png_util_create_flat_bytes_p_from_row_pp(source_image->png_rows, source_width, source_height, source_png_size_in_bytes);
cudaMemcpy(d_scale_params.source_bytes_sequential_p, png_source_bytes_p, source_png_size_in_bytes, cudaMemcpyHostToDevice);
dimensions_info dimensions_inf;
dimensions_inf.result_image_width = new_width;
dimensions_inf.result_image_height = new_height;
dimensions_inf.source_image_width = source_width;
cudaMemcpy(d_scale_params.dimensions_info_p, &dimensions_inf, sizeof(dimensions_info), cudaMemcpyHostToDevice);
d_sobel_params.dimensions_inf_p = d_scale_params.dimensions_info_p;
dim3 block_size(32, 32);
unsigned blocks_in_x_direction;
unsigned blocks_in_y_direction;
dim3 grid_size;
unsigned y_offset = new_height / 2;
switch (kernel_mode) {
case kernel_mode::bilinear_nn:
blocks_in_x_direction = (new_width + block_size.x - 1) / block_size.x;
blocks_in_y_direction = ((new_height / 2) + block_size.y - 1) / block_size.y;
grid_size = dim3(blocks_in_x_direction, blocks_in_y_direction);
global::apply_bilinear_filter << <grid_size, block_size>> > (d_scale_params, 0, y_offset);
global::apply_nearest_neighbor << <grid_size, block_size>> > (d_scale_params, y_offset, new_height);
cudaMemcpy(result_image->png_rows[0], d_scale_params.result_image_bytes_sequential_p, result_image_size_in_bytes, cudaMemcpyDeviceToHost);
break;
case kernel_mode::bilinear_nn_sobel:
blocks_in_x_direction = (new_width + block_size.x - 1) / block_size.x;
blocks_in_y_direction = ((new_height / 2) + block_size.y - 1) / block_size.y;
grid_size = dim3(blocks_in_x_direction, blocks_in_y_direction);
global::apply_bilinear_filter << <grid_size, block_size>> > (d_scale_params, 0, y_offset);
global::apply_nearest_neighbor << <grid_size, block_size>> > (d_scale_params, y_offset, new_height);
blocks_in_y_direction = (new_height + block_size.y - 1) / block_size.y;
grid_size = dim3(blocks_in_x_direction, blocks_in_y_direction);
apply_sobel_filter << <grid_size, block_size >> > (d_sobel_params, 0, new_height);
// blocks_in_x_direction = (new_width * 16 + block_size.x - 1) / block_size.x;
// blocks_in_y_direction = (new_height + block_size.y - 1) / block_size.y;
//
// grid_size = dim3(blocks_in_x_direction, blocks_in_y_direction);
//
// sobel_cooperative_groups_tile16_8::apply_sobel_filter << <grid_size, block_size >> > (d_sobel_params);
cudaMemcpy(result_image->png_rows[0], d_sobel_params.result_bytes_sequential_p, result_image_size_in_bytes, cudaMemcpyDeviceToHost);
break;
case kernel_mode::branch_bilinear_nn_dynamic_sobel:
blocks_in_x_direction = (new_width + block_size.x - 1) / block_size.x;
blocks_in_y_direction = (new_height + block_size.y - 1) / block_size.y;
grid_size = dim3(blocks_in_x_direction, blocks_in_y_direction);
bilinear_nn_sobel << <grid_size, block_size>> > (d_scale_params, d_sobel_params);
cudaMemcpy(result_image->png_rows[0], d_sobel_params.result_bytes_sequential_p, result_image_size_in_bytes, cudaMemcpyDeviceToHost);
break;
default:;
}
cudaFree(allocated_memory_on_gpu_p);
}
}
|
138456a02393acc703f21a75e3f887ffce44949f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lab3.h"
#include <cstdio>
__device__ __host__ int CeilDiv( int a, int b ) { return (a-1)/b + 1; }
__device__ __host__ int CeilAlign( int a, int b ) { return CeilDiv(a, b) * b; }
__global__ void SimpleClone(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
) {
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
const int n_curt = curt-wt,
w_curt = curt-1,
e_curt = curt+1,
s_curt = curt+wt;
if ( xt >= 0 and xt < wt-1 and yt > 0 and yt < ht-1 and mask[curt] > 127.0f ) {
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
const int n_curb = curb-wb,
w_curb = curb-1,
e_curb = curb+1,
s_curb = curb+wb;
if ( yb < hb and xb < wb ) {
int n_px = 4;
float sur_output[3] = {0.0f, 0.0f, 0.0f},
sur_target[3] = {0.0f, 0.0f, 0.0f};
for (int i = 0; i < 3; i++) {
n_px = 4;
if (yb > 0) {
sur_output[i] += output[n_curb*3+i];
sur_target[i] += target[n_curt*3+i];
}
else
n_px--;
if (xb > 0) {
sur_output[i] += output[w_curb*3+i];
sur_target[i] += target[w_curt*3+i];
}
else
n_px--;
if (xb < wb-1) {
sur_output[i] += output[e_curb*3+i];
sur_target[i] += target[e_curt*3+i];
}
else
n_px--;
if (yb < hb-1) {
sur_output[i] += output[s_curb*3+i];
sur_target[i] += target[s_curt*3+i];
}
else
n_px--;
output[curb*3+i] = target[curt*3+i] + (sur_output[i]-sur_target[i])/n_px;
}
}
}
}
void PoissonImageCloning(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
hipMemcpy(output, background, wb*hb*sizeof(float)*3, hipMemcpyDeviceToDevice);
for ( auto i = 0; i < 17754; ++i ) {
hipLaunchKernelGGL(( SimpleClone), dim3(dim3(CeilDiv(wt,32), CeilDiv(ht,16))), dim3(dim3(32,16)), 0, 0,
background, target, mask, output,
wb, hb, wt, ht, oy, ox
);
}
} | 138456a02393acc703f21a75e3f887ffce44949f.cu | #include "lab3.h"
#include <cstdio>
__device__ __host__ int CeilDiv( int a, int b ) { return (a-1)/b + 1; }
__device__ __host__ int CeilAlign( int a, int b ) { return CeilDiv(a, b) * b; }
__global__ void SimpleClone(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
) {
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
const int n_curt = curt-wt,
w_curt = curt-1,
e_curt = curt+1,
s_curt = curt+wt;
if ( xt >= 0 and xt < wt-1 and yt > 0 and yt < ht-1 and mask[curt] > 127.0f ) {
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
const int n_curb = curb-wb,
w_curb = curb-1,
e_curb = curb+1,
s_curb = curb+wb;
if ( yb < hb and xb < wb ) {
int n_px = 4;
float sur_output[3] = {0.0f, 0.0f, 0.0f},
sur_target[3] = {0.0f, 0.0f, 0.0f};
for (int i = 0; i < 3; i++) {
n_px = 4;
if (yb > 0) {
sur_output[i] += output[n_curb*3+i];
sur_target[i] += target[n_curt*3+i];
}
else
n_px--;
if (xb > 0) {
sur_output[i] += output[w_curb*3+i];
sur_target[i] += target[w_curt*3+i];
}
else
n_px--;
if (xb < wb-1) {
sur_output[i] += output[e_curb*3+i];
sur_target[i] += target[e_curt*3+i];
}
else
n_px--;
if (yb < hb-1) {
sur_output[i] += output[s_curb*3+i];
sur_target[i] += target[s_curt*3+i];
}
else
n_px--;
output[curb*3+i] = target[curt*3+i] + (sur_output[i]-sur_target[i])/n_px;
}
}
}
}
void PoissonImageCloning(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
cudaMemcpy(output, background, wb*hb*sizeof(float)*3, cudaMemcpyDeviceToDevice);
for ( auto i = 0; i < 17754; ++i ) {
SimpleClone<<<dim3(CeilDiv(wt,32), CeilDiv(ht,16)), dim3(32,16)>>>(
background, target, mask, output,
wb, hb, wt, ht, oy, ox
);
}
} |
948d03abb2866c677257084e2090bb27b6c1d978.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <cfloat>
//VERSION 0.8 MODIFIED 10/25/16 12:34 by Jack
// The number of threads per blocks in the kernel
// (if we define it here, then we can use its value in the kernel,
// for example to statically declare an array in shared memory)
const int threads_per_block = 256;
// Forward function declarations
float GPU_vector_max(float *A, int N, int kernel_code, float *kernel_time, float *transfer_time);
float CPU_vector_max(float *A, int N);
float *get_random_vector(int N);
float *get_increasing_vector(int N);
float usToSec(long long time);
long long start_timer();
long long stop_timer(long long start_time, const char *name);
void die(const char *message);
void checkError();
// Main program
int main(int argc, char **argv) {
//default kernel
int kernel_code = 1;
// Parse vector length and kernel options
int N;
if(argc == 2) {
N = atoi(argv[1]); // user-specified value
} else if (argc == 4 && !strcmp(argv[2], "-k")) {
N = atoi(argv[1]); // user-specified value
kernel_code = atoi(argv[3]);
printf("KERNEL_CODE %d\n", kernel_code);
} else {
die("USAGE: ./vector_max <vector_length> -k <kernel_code>");
}
// Seed the random generator (use a constant here for repeatable results)
srand(10);
// Generate a random vector
// You can use "get_increasing_vector()" for debugging
long long vector_start_time = start_timer();
float *vec = get_random_vector(N);
//float *vec = get_increasing_vector(N);
stop_timer(vector_start_time, "Vector generation");
// Compute the max on the GPU
float GPU_kernel_time;
float transfer_time;
long long GPU_start_time = start_timer();
float result_GPU = GPU_vector_max(vec, N, kernel_code, &GPU_kernel_time, &transfer_time);
long long GPU_time = stop_timer(GPU_start_time, "\t Total");
printf("\tTotal Kernel Time: %f sec\n", GPU_kernel_time);
// Compute the max on the CPU
long long CPU_start_time = start_timer();
float result_CPU = CPU_vector_max(vec, N);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU");
// Free vector
hipFree(vec);
// Compute the speedup or slowdown
//// Not including data transfer
if (GPU_kernel_time > usToSec(CPU_time)) printf("\nCPU outperformed GPU kernel by %.2fx\n", (float) (GPU_kernel_time) / usToSec(CPU_time));
else printf("\nGPU kernel outperformed CPU by %.2fx\n", (float) usToSec(CPU_time) / (float) GPU_kernel_time);
//// Including data transfer
if (GPU_time > CPU_time) printf("\nCPU outperformed GPU total runtime (including data transfer) by %.2fx\n", (float) GPU_time / (float) CPU_time);
else printf("\nGPU total runtime (including data transfer) outperformed CPU by %.2fx\n", (float) CPU_time / (float) GPU_time);
// Check the correctness of the GPU results
int wrong = result_CPU != result_GPU;
// Report the correctness results
if(wrong) printf("GPU output %f did not match CPU output %f\n", result_GPU, result_CPU);
}
// A GPU kernel that computes the maximum value of a vector
// (each lead thread (threadIdx.x == 0) computes a single value
__global__ void vector_max_kernel(float *in, float *out, int N) {
// Determine the "flattened" block id and thread id
int block_id = blockIdx.x + gridDim.x * blockIdx.y;
int thread_id = blockDim.x * block_id + threadIdx.x;
// A single "lead" thread in each block finds the maximum value over a range of size threads_per_block
float max = 0.0;
if (threadIdx.x == 0) {
//calculate out of bounds guard
//our block size will be 256, but our vector may not be a multiple of 256!
int end = threads_per_block;
if(thread_id + threads_per_block > N)
end = N - thread_id;
//grab the lead thread's value
max = in[thread_id];
//grab values from all other threads' locations
for(int i = 1; i < end; i++) {
//if larger, replace
if(max < in[thread_id + i])
max = in[thread_id + i];
}
out[block_id] = max;
}
}
/////////////////////////////////////////////
// COPY KERNEL ONE AND CREATE NEW KERNELS HERE
/////////////////////////////////////////////
// Returns the maximum value within a vector of length N
float GPU_vector_max(float *in_CPU, int N, int kernel_code, float *kernel_runtime, float *transfer_runtime) {
long long transfer_time = 0;
long long kernel_time = 0;
int vector_size = N * sizeof(float);
// Allocate CPU memory for the result
float *out_CPU;
hipHostMalloc((void **) &out_CPU, vector_size * sizeof(float));
if (out_CPU == NULL) die("Error allocating CPU memory");
// Allocate GPU memory for the inputs and the result
long long memory_start_time = start_timer();
float *in_GPU, *out_GPU;
if (hipMalloc((void **) &in_GPU, vector_size) != hipSuccess) die("Error allocating GPU memory");
if (hipMalloc((void **) &out_GPU, vector_size) != hipSuccess) die("Error allocating GPU memory");
// Transfer the input vectors to GPU memory
hipMemcpy(in_GPU, in_CPU, vector_size, hipMemcpyHostToDevice);
hipDeviceSynchronize(); // this is only needed for timing purposes
transfer_time += stop_timer(memory_start_time, "\nGPU:\t Transfer to GPU");
// Determine the number of thread blocks in the x- and y-dimension
int num_blocks = (int) ((float) (N + threads_per_block - 1) / (float) threads_per_block);
int max_blocks_per_dimension = 65535;
int num_blocks_y = (int) ((float) (num_blocks + max_blocks_per_dimension - 1) / (float) max_blocks_per_dimension);
int num_blocks_x = (int) ((float) (num_blocks + num_blocks_y - 1) / (float) num_blocks_y);
dim3 grid_size(num_blocks_x, num_blocks_y, 1);
// Execute the kernel to compute the vector sum on the GPU
long long kernel_start_time;
kernel_start_time = start_timer();
switch(kernel_code){
case 1 :
hipLaunchKernelGGL(( vector_max_kernel) , dim3(grid_size) , dim3(threads_per_block) , 0, 0, in_GPU, out_GPU, N);
break;
case 2 :
//LAUNCH KERNEL FROM PROBLEM 2 HERE
die("KERNEL 2 NOT IMPLEMENTED YET\n");
break;
case 3 :
//LAUNCH KERNEL FROM PROBLEM 3 HERE
die("KERNEL 3 NOT IMPLEMENTED YET\n");
break;
case 4 :
//LAUNCH KERNEL FROM PROBLEM 4 HERE
die("KERNEL 4 NOT IMPLEMENTED YET\n");
break;
default :
die("INVALID KERNEL CODE\n");
}
hipDeviceSynchronize(); // this is only needed for timing purposes
kernel_time += stop_timer(kernel_start_time, "\t Kernel execution");
checkError();
// Transfer the result from the GPU to the CPU
memory_start_time = start_timer();
//copy C back
hipMemcpy(out_CPU, out_GPU, vector_size, hipMemcpyDeviceToHost);
checkError();
hipDeviceSynchronize(); // this is only needed for timing purposes
transfer_time += stop_timer(memory_start_time, "\tTransfer from GPU");
// Free the GPU memory
hipFree(in_GPU);
hipFree(out_GPU);
float max = out_CPU[0];
hipFree(out_CPU);
// fill input pointers with ms runtimes
*kernel_runtime = usToSec(kernel_time);
*transfer_runtime = usToSec(transfer_time);
//return a single statistic
return max;
}
// Returns the maximum value within a vector of length N
float CPU_vector_max(float *vec, int N) {
// find the max
float max;
max = vec[0];
for (int i = 1; i < N; i++) {
if(max < vec[i]) {
max = vec[i];
}
}
// Return a single statistic
return max;
}
// Returns a randomized vector containing N elements
float *get_random_vector(int N) {
if (N < 1) die("Number of elements must be greater than zero");
// Allocate memory for the vector
float *V;
hipHostMalloc((void **) &V, N * sizeof(float));
if (V == NULL) die("Error allocating CPU memory");
// Populate the vector with random numbers
for (int i = 0; i < N; i++) V[i] = (float) rand() / (float) rand();
// Return the randomized vector
return V;
}
float *get_increasing_vector(int N) {
if (N < 1) die("Number of elements must be greater than zero");
// Allocate memory for the vector
float *V;
hipHostMalloc((void **) &V, N * sizeof(float));
if (V == NULL) die("Error allocating CPU memory");
// Populate the vector with random numbers
for (int i = 0; i < N; i++) V[i] = (float) i;
// Return the randomized vector
return V;
}
void checkError() {
// Check for kernel errors
hipError_t error = hipGetLastError();
if (error) {
char message[256];
sprintf(message, "CUDA error: %s", hipGetErrorString(error));
die(message);
}
}
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// converts a long long ns value to float seconds
float usToSec(long long time) {
return ((float)time)/(1000000);
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, const char *name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
float elapsed = usToSec(end_time - start_time);
printf("%s: %.5f sec\n", name, elapsed);
return end_time - start_time;
}
// Prints the specified message and quits
void die(const char *message) {
printf("%s\n", message);
exit(1);
}
| 948d03abb2866c677257084e2090bb27b6c1d978.cu | #include <stdio.h>
#include <sys/time.h>
#include <cuda.h>
#include <cfloat>
//VERSION 0.8 MODIFIED 10/25/16 12:34 by Jack
// The number of threads per blocks in the kernel
// (if we define it here, then we can use its value in the kernel,
// for example to statically declare an array in shared memory)
const int threads_per_block = 256;
// Forward function declarations
float GPU_vector_max(float *A, int N, int kernel_code, float *kernel_time, float *transfer_time);
float CPU_vector_max(float *A, int N);
float *get_random_vector(int N);
float *get_increasing_vector(int N);
float usToSec(long long time);
long long start_timer();
long long stop_timer(long long start_time, const char *name);
void die(const char *message);
void checkError();
// Main program
int main(int argc, char **argv) {
//default kernel
int kernel_code = 1;
// Parse vector length and kernel options
int N;
if(argc == 2) {
N = atoi(argv[1]); // user-specified value
} else if (argc == 4 && !strcmp(argv[2], "-k")) {
N = atoi(argv[1]); // user-specified value
kernel_code = atoi(argv[3]);
printf("KERNEL_CODE %d\n", kernel_code);
} else {
die("USAGE: ./vector_max <vector_length> -k <kernel_code>");
}
// Seed the random generator (use a constant here for repeatable results)
srand(10);
// Generate a random vector
// You can use "get_increasing_vector()" for debugging
long long vector_start_time = start_timer();
float *vec = get_random_vector(N);
//float *vec = get_increasing_vector(N);
stop_timer(vector_start_time, "Vector generation");
// Compute the max on the GPU
float GPU_kernel_time;
float transfer_time;
long long GPU_start_time = start_timer();
float result_GPU = GPU_vector_max(vec, N, kernel_code, &GPU_kernel_time, &transfer_time);
long long GPU_time = stop_timer(GPU_start_time, "\t Total");
printf("\tTotal Kernel Time: %f sec\n", GPU_kernel_time);
// Compute the max on the CPU
long long CPU_start_time = start_timer();
float result_CPU = CPU_vector_max(vec, N);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU");
// Free vector
cudaFree(vec);
// Compute the speedup or slowdown
//// Not including data transfer
if (GPU_kernel_time > usToSec(CPU_time)) printf("\nCPU outperformed GPU kernel by %.2fx\n", (float) (GPU_kernel_time) / usToSec(CPU_time));
else printf("\nGPU kernel outperformed CPU by %.2fx\n", (float) usToSec(CPU_time) / (float) GPU_kernel_time);
//// Including data transfer
if (GPU_time > CPU_time) printf("\nCPU outperformed GPU total runtime (including data transfer) by %.2fx\n", (float) GPU_time / (float) CPU_time);
else printf("\nGPU total runtime (including data transfer) outperformed CPU by %.2fx\n", (float) CPU_time / (float) GPU_time);
// Check the correctness of the GPU results
int wrong = result_CPU != result_GPU;
// Report the correctness results
if(wrong) printf("GPU output %f did not match CPU output %f\n", result_GPU, result_CPU);
}
// A GPU kernel that computes the maximum value of a vector
// (each lead thread (threadIdx.x == 0) computes a single value
__global__ void vector_max_kernel(float *in, float *out, int N) {
// Determine the "flattened" block id and thread id
int block_id = blockIdx.x + gridDim.x * blockIdx.y;
int thread_id = blockDim.x * block_id + threadIdx.x;
// A single "lead" thread in each block finds the maximum value over a range of size threads_per_block
float max = 0.0;
if (threadIdx.x == 0) {
//calculate out of bounds guard
//our block size will be 256, but our vector may not be a multiple of 256!
int end = threads_per_block;
if(thread_id + threads_per_block > N)
end = N - thread_id;
//grab the lead thread's value
max = in[thread_id];
//grab values from all other threads' locations
for(int i = 1; i < end; i++) {
//if larger, replace
if(max < in[thread_id + i])
max = in[thread_id + i];
}
out[block_id] = max;
}
}
/////////////////////////////////////////////
// COPY KERNEL ONE AND CREATE NEW KERNELS HERE
/////////////////////////////////////////////
// Returns the maximum value within a vector of length N
float GPU_vector_max(float *in_CPU, int N, int kernel_code, float *kernel_runtime, float *transfer_runtime) {
long long transfer_time = 0;
long long kernel_time = 0;
int vector_size = N * sizeof(float);
// Allocate CPU memory for the result
float *out_CPU;
cudaMallocHost((void **) &out_CPU, vector_size * sizeof(float));
if (out_CPU == NULL) die("Error allocating CPU memory");
// Allocate GPU memory for the inputs and the result
long long memory_start_time = start_timer();
float *in_GPU, *out_GPU;
if (cudaMalloc((void **) &in_GPU, vector_size) != cudaSuccess) die("Error allocating GPU memory");
if (cudaMalloc((void **) &out_GPU, vector_size) != cudaSuccess) die("Error allocating GPU memory");
// Transfer the input vectors to GPU memory
cudaMemcpy(in_GPU, in_CPU, vector_size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize(); // this is only needed for timing purposes
transfer_time += stop_timer(memory_start_time, "\nGPU:\t Transfer to GPU");
// Determine the number of thread blocks in the x- and y-dimension
int num_blocks = (int) ((float) (N + threads_per_block - 1) / (float) threads_per_block);
int max_blocks_per_dimension = 65535;
int num_blocks_y = (int) ((float) (num_blocks + max_blocks_per_dimension - 1) / (float) max_blocks_per_dimension);
int num_blocks_x = (int) ((float) (num_blocks + num_blocks_y - 1) / (float) num_blocks_y);
dim3 grid_size(num_blocks_x, num_blocks_y, 1);
// Execute the kernel to compute the vector sum on the GPU
long long kernel_start_time;
kernel_start_time = start_timer();
switch(kernel_code){
case 1 :
vector_max_kernel <<< grid_size , threads_per_block >>> (in_GPU, out_GPU, N);
break;
case 2 :
//LAUNCH KERNEL FROM PROBLEM 2 HERE
die("KERNEL 2 NOT IMPLEMENTED YET\n");
break;
case 3 :
//LAUNCH KERNEL FROM PROBLEM 3 HERE
die("KERNEL 3 NOT IMPLEMENTED YET\n");
break;
case 4 :
//LAUNCH KERNEL FROM PROBLEM 4 HERE
die("KERNEL 4 NOT IMPLEMENTED YET\n");
break;
default :
die("INVALID KERNEL CODE\n");
}
cudaDeviceSynchronize(); // this is only needed for timing purposes
kernel_time += stop_timer(kernel_start_time, "\t Kernel execution");
checkError();
// Transfer the result from the GPU to the CPU
memory_start_time = start_timer();
//copy C back
cudaMemcpy(out_CPU, out_GPU, vector_size, cudaMemcpyDeviceToHost);
checkError();
cudaDeviceSynchronize(); // this is only needed for timing purposes
transfer_time += stop_timer(memory_start_time, "\tTransfer from GPU");
// Free the GPU memory
cudaFree(in_GPU);
cudaFree(out_GPU);
float max = out_CPU[0];
cudaFree(out_CPU);
// fill input pointers with ms runtimes
*kernel_runtime = usToSec(kernel_time);
*transfer_runtime = usToSec(transfer_time);
//return a single statistic
return max;
}
// Returns the maximum value within a vector of length N
float CPU_vector_max(float *vec, int N) {
// find the max
float max;
max = vec[0];
for (int i = 1; i < N; i++) {
if(max < vec[i]) {
max = vec[i];
}
}
// Return a single statistic
return max;
}
// Returns a randomized vector containing N elements
float *get_random_vector(int N) {
if (N < 1) die("Number of elements must be greater than zero");
// Allocate memory for the vector
float *V;
cudaMallocHost((void **) &V, N * sizeof(float));
if (V == NULL) die("Error allocating CPU memory");
// Populate the vector with random numbers
for (int i = 0; i < N; i++) V[i] = (float) rand() / (float) rand();
// Return the randomized vector
return V;
}
float *get_increasing_vector(int N) {
if (N < 1) die("Number of elements must be greater than zero");
// Allocate memory for the vector
float *V;
cudaMallocHost((void **) &V, N * sizeof(float));
if (V == NULL) die("Error allocating CPU memory");
// Populate the vector with random numbers
for (int i = 0; i < N; i++) V[i] = (float) i;
// Return the randomized vector
return V;
}
void checkError() {
// Check for kernel errors
cudaError_t error = cudaGetLastError();
if (error) {
char message[256];
sprintf(message, "CUDA error: %s", cudaGetErrorString(error));
die(message);
}
}
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// converts a long long ns value to float seconds
float usToSec(long long time) {
return ((float)time)/(1000000);
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, const char *name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
float elapsed = usToSec(end_time - start_time);
printf("%s: %.5f sec\n", name, elapsed);
return end_time - start_time;
}
// Prints the specified message and quits
void die(const char *message) {
printf("%s\n", message);
exit(1);
}
|
65aee0043ade043ed000d1dd588f5c08b38fd1df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Luigy Machaca Arcana
// Computer science - Arequipa, Per 2017
#include <stdlib.h>
#include <stdio.h>
#include <fstream>
#include <iostream>
#include <string>
using namespace std;
#define WIDTH_TILE 32
void convolution_serial(int** dd_mat_a, int n_rows_a, int n_cols_a ,int** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){
//void conv2(arma::mat (&in), int n, int m, arma::mat (&conv), int p, int q, arma::mat(&out)){
for(int i=0 ; i<n_rows_a ; i++){
for(int j=0 ; j<n_cols_a ; j++){
double offset = 0;
for(int k=0 ; k<n_rows_b ; k++){
for(int l=0 ; l<n_cols_b ; l++){
//double cc = conv.at(k,l);
double cc = dd_mat_b[k][l];
double dd = 0;
if( (i-(int)(n_rows_b/2)+k)>=0 && (j-(int)(n_cols_b/2)+l)>=0 &&
(i-(int)(n_rows_b/2)+k)<n_rows_a && (j-(int)(n_cols_b/2)+l)<n_cols_a ){
dd = dd_mat_a[i-(int)(n_rows_b/2)+k][j-(int)(n_cols_b/2)+l];
//dd = in.at( i-(int)(p/2)+k , j-(int)(q/2)+l );
}
offset += cc*dd;
}
}
//out.at(i,j) = offset>0? offset:0;
dd_mat_c[i][j] = offset>0? offset:0;
//dd_mat_c[i][j] =-3;
}
}
}
__global__
void convolution(int** dd_mat_a, int n_rows_a, int n_cols_a ,double** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){
int n_kernel_row = n_rows_b; //n_cols_b
int n_kernel_col = n_cols_b; //n_cols_b
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if( ((int)(n_kernel_row/2)-1)< row && row<(n_rows_a-(int)(n_kernel_row/2)) &&
((int)(n_kernel_col/2)-1)< col && col<(n_cols_a-(int)(n_kernel_col/2)) ){
double offset = 0;
for(int k=0 ; k<n_kernel_row ; k++){
for(int l=0 ; l<n_kernel_col ; l++){
double cc = dd_mat_b[k][l];
double dd = 0;
dd = (double)dd_mat_a[row-(int)(n_kernel_row/2)+k][col-(int)(n_kernel_col/2)+l];
offset += cc*dd;
}
}
offset = offset>0?offset:0;
dd_mat_c[row][col] = offset;
//dd_mat_c[row][col] = dd_mat_a[row][col];
}
}
__global__
void convolution_complete(int** dd_mat_a, int n_rows_a, int n_cols_a ,double** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){
int n_kernel_row = n_rows_b; //n_cols_b
int n_kernel_col = n_cols_b; //n_cols_b
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if( row<n_rows_a && col<n_cols_a ){
double offset = 0;
for(int k=0 ; k<n_kernel_row ; k++){
for(int l=0 ; l<n_kernel_col ; l++){
double cc = dd_mat_b[k][l];
double dd = 0;
//dd = dd_mat_a[row-(int)(n_kernel_row/2)+k][col-(int)(n_kernel_col/2)+l];
if( (row-(int)(n_kernel_row/2)+k)>=0 && (row-(int)(n_kernel_row/2)+k)<n_rows_a &&
(col-(int)(n_kernel_col/2)+l)>=0 && (col-(int)(n_kernel_col/2)+l)<n_cols_a ){
dd = dd_mat_a[row-(int)(n_kernel_row/2)+k][col-(int)(n_kernel_col/2)+l];
}
offset += cc*dd;
}
}
offset = -1/256*offset;
offset = offset>0?offset:0;
offset = (int)offset%255 + 1;
dd_mat_c[row][col] = offset;
//dd_mat_c[row][col] = -1;
}
}
__global__ void matrix_mult_shared(int** dd_mat_a, int n_rows_a, int n_cols_a ,int** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){
__shared__ int Mds[WIDTH_TILE][WIDTH_TILE];
__shared__ int Nds[WIDTH_TILE][WIDTH_TILE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int value = 0;
int row = by*WIDTH_TILE + ty;
int col = bx*WIDTH_TILE + tx;
int width = n_cols_a; //n_cols_a == n_rows_b
int k;
for( k=0 ; k<(int)(width-1+WIDTH_TILE)/(int)WIDTH_TILE ; ++k ){
if (k*WIDTH_TILE+tx < n_cols_a && row < n_rows_a){
Mds[ty][tx] = dd_mat_a[row][k*WIDTH_TILE+tx];
}
else{
Mds[ty][tx] = 0;
}
if (k*WIDTH_TILE+ty < n_rows_b && col < n_cols_b){
Nds[ty][tx] = dd_mat_b[k*WIDTH_TILE+ty][col];
}
else{
Nds[ty][tx] = 0;
}
__syncthreads();
int m;
for(m=0 ; m<WIDTH_TILE ; ++m){
value += Mds[ty][m]*Nds[m][tx];
}
__syncthreads();
}
if(row<n_rows_c && col<n_cols_c){
dd_mat_c[row][col]=value;
}
}
__global__ void matrix_mult(int** dd_mat_a, int n_rows_a, int n_cols_a ,int** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){
int value=0;
int tx=threadIdx.x;
int ty=threadIdx.y;
int x = tx + blockIdx.x*blockDim.x;
int y = ty + blockIdx.y*blockDim.y;
if( y<n_rows_c && x<n_cols_c ){
int i;
for(i=0 ; i<n_cols_a ; i++){
value += dd_mat_a[y][i] * dd_mat_b[i][x];
}
dd_mat_c[y][x]=value;
}
}
void fill(int** mat, int n, int m){
srand(time(0));
int i,j;
for(i=0; i<n ;i++){
for(j=0; j<m ;j++)
//mat[i][j] = rand()%3+1;
mat[i][j] = 1;
}
}
void fill_value(int** mat,int n, int m, int value=0){
int i,j;
for(i=0;i<n;i++)
for(j=0;j<m;j++)
mat[i][j] = value;
}
void print(int** mat,int n, int m){
int i,j;
for(i=0; i<n ;i++){
for(j=0; j<m ;j++)
printf("%d ",mat[i][j]);
printf("\n");
}
}
void print2(double** mat,int n, int m){
int i,j;
for(i=0; i<n ;i++){
for(j=0; j<m ;j++)
printf("%f ",mat[i][j]);
printf("\n");
}
}
double max_value_matrix(int** mat,int n, int m){
int i,j;
int max = -100000;
for(i=0; i<n ;i++){
for(j=0; j<m ;j++){
max = (mat[i][j] > max)?mat[i][j]:max;
}
}
return max;
}
void normalize(int** mat,int n, int m, double value_normalice){
int i,j;
for(i=0; i<n ;i++){
for(j=0; j<m ;j++){
mat[i][j] = mat[i][j] / (double)value_normalice ;
}
}
}
void create_copy(int**& mat, int**& d_mat, int**& dd_mat, int n_rows, int n_cols){
int i;
int size_row = sizeof(int*) * n_rows;
d_mat = (int**) malloc(size_row);
hipMalloc((void**)& d_mat[0], sizeof(int) * n_rows * n_cols );
hipMemcpy( d_mat[0], mat[0], sizeof(int) * n_rows * n_cols ,hipMemcpyHostToDevice);
for( i=1 ; i<n_rows ; i++ ){
d_mat[i] = (d_mat[0]+i*n_cols);
}
hipMalloc((void***)& dd_mat, size_row );
hipMemcpy( dd_mat, d_mat, size_row, hipMemcpyHostToDevice );
}
void create(int**& mat, int**& d_mat, int**& dd_mat, int n_rows, int n_cols, int fillValue=-1){
int i;
mat = (int** )malloc(sizeof(int*) * n_rows );
mat[0] = (int* )malloc(sizeof(int ) * n_rows * n_cols );
for( i=1 ; i<n_rows ; i++ ){
mat[i] = mat[i-1]+n_cols;
}
if(fillValue==-1){
fill(mat,n_rows,n_cols);
}
else{
fill_value(mat,n_rows,n_cols,fillValue);
}
int size_row = sizeof(int*) * n_rows;
d_mat = (int**) malloc(size_row);
hipMalloc((void**)& d_mat[0], sizeof(int) * n_rows * n_cols );
hipMemcpy( d_mat[0], mat[0], sizeof(int) * n_rows * n_cols ,hipMemcpyHostToDevice);
for( i=1 ; i<n_rows ; i++ ){
d_mat[i] = (d_mat[0]+i*n_cols);
}
hipMalloc((void***)& dd_mat, size_row );
hipMemcpy( dd_mat, d_mat, size_row, hipMemcpyHostToDevice );
}
void create_kernell_random(double**& mat, double**& d_mat, double**& dd_mat, int n_rows, int n_cols){
int i,j;
mat = (double** )malloc(sizeof(double*) * n_rows );
mat[0] = (double* )malloc(sizeof(double ) * n_rows * n_cols );
for( i=1 ; i<n_rows ; i++ ){
mat[i] = mat[i-1]+n_cols;
}
srand(time(0));
for(i=0; i<n_rows ;i++){
for(j=0; j<n_cols ;j++){
mat[i][j] = (double)(rand()%100-50);
//mat[i][j] = 1;
}
}
int size_row = sizeof(double*) * n_rows;
d_mat = (double**) malloc(size_row);
hipMalloc((void**)& d_mat[0], sizeof(double) * n_rows * n_cols );
hipMemcpy( d_mat[0], mat[0], sizeof(double) * n_rows * n_cols ,hipMemcpyHostToDevice);
for( i=1 ; i<n_rows ; i++ ){
d_mat[i] = (d_mat[0]+i*n_cols);
}
hipMalloc((void***)& dd_mat, size_row );
hipMemcpy( dd_mat, d_mat, size_row, hipMemcpyHostToDevice );
}
/////////////////////////////////////////////////////////////////////////
///////////////// Filter Edge detection
/////////////////////////////////////////////////////////////////////////
void fill_kernel_3x3_Edge_detection(double** mat, int n, int m, double scalar_kernel=1){
mat[0][0]=0; mat[0][1]= 1; mat[0][2]=0;
mat[1][0]=1; mat[1][1]=-4; mat[1][2]=1;
mat[2][0]=0; mat[2][1]= 1; mat[2][2]=0;
for(int i=0 ; i<n ; i++){
for(int j=0 ; j<m ; j++){
mat[i][j]=scalar_kernel*mat[i][j];
}
}
}
/////////////////////////////////////////////////////////////////////////
///////////////// Filter Sharpen
/////////////////////////////////////////////////////////////////////////
void fill_kernel_3x3_Sharpen(double** mat, int n, int m, double scalar_kernel=1){
// 0 -1 0
//-1 5 -1
// 0 -1 0
mat[0][0]=0; mat[0][1]=-1; mat[0][2]=0;
mat[1][0]=-1; mat[1][1]=5; mat[1][2]=-1;
mat[2][0]=0; mat[2][1]=-1; mat[2][2]=0;
for(int i=0 ; i<n ; i++){
for(int j=0 ; j<m ; j++){
mat[i][j]=scalar_kernel*mat[i][j];
}
}
}
/////////////////////////////////////////////////////////////////////////
///////////////// Gaussian blur
/////////////////////////////////////////////////////////////////////////
void fill_kernel_5x5_Gaussian_blur(double** mat, int n, int m, double scalar_kernel=1){
// 1 4 6 4 1
// 4 16 24 16 4
//(-1/256) // 6 24 -476 24 6
// 4 16 24 16 4
// 1 4 6 4 1
mat[0][0]=1; mat[0][1]=4 ; mat[0][2]=6 ; mat[0][3]=4 ; mat[0][4]=1;
mat[1][0]=4; mat[1][1]=16; mat[1][2]=24 ; mat[1][3]=16; mat[1][4]=4;
mat[2][0]=6; mat[2][1]=24; mat[2][2]=-476; mat[2][3]=24; mat[2][4]=6;
mat[3][0]=4; mat[3][1]=16; mat[3][2]=24 ; mat[3][3]=16; mat[3][4]=4;
mat[4][0]=1; mat[4][1]=4 ; mat[4][2]=6 ; mat[4][3]=4 ; mat[4][4]=1;
printf("2222xxxxxxx %.25f\n",scalar_kernel);
for(int i=0 ; i<n ; i++){
for(int j=0 ; j<m ; j++){
mat[i][j] = scalar_kernel*mat[i][j];
}
}
}
void create_kernell_static(double**& mat, double**& d_mat, double**& dd_mat, int n_rows, int n_cols, double scalar_kernel=1){
int i;
mat = (double** )malloc(sizeof(double*) * n_rows );
mat[0] = (double* )malloc(sizeof(double ) * n_rows * n_cols );
for( i=1 ; i<n_rows ; i++ ){
mat[i] = mat[i-1]+n_cols;
}
fill_kernel_3x3_Edge_detection(mat,n_rows,n_cols, scalar_kernel);
//fill_kernel_3x3_Sharpen(mat,n_rows,n_cols, scalar_kernel);
//fill_kernel_5x5_Gaussian_blur(mat,n_rows,n_cols, scalar_kernel);
int size_row = sizeof(double*) * n_rows;
d_mat = (double**) malloc(size_row);
hipMalloc((void**)& d_mat[0], sizeof(double) * n_rows * n_cols );
hipMemcpy( d_mat[0], mat[0], sizeof(double) * n_rows * n_cols ,hipMemcpyHostToDevice);
for( i=1 ; i<n_rows ; i++ ){
d_mat[i] = (d_mat[0]+i*n_cols);
}
hipMalloc((void***)& dd_mat, size_row );
hipMemcpy( dd_mat, d_mat, size_row, hipMemcpyHostToDevice );
}
int main(int argc, char *argv[]){
printf("//////////////////////////////////\n");
char temp1[350];
strcpy (temp1 , argv[1]);
const char* img_input_name = temp1;
char temp2[150];
strcpy (temp2 , argv[1]);
strcat (temp2 , ".out.random.kernel.EdgeDetec.pgm");
const char* img_output_name = temp2;
printf ("name in: %s\n",img_input_name);
printf ("name out: %s\n",img_output_name);
string title1,title2;
char rows[15];
char cols[15];
char max_val[15];
int n_rows = -1;
int n_cols = -1;
//int max_value = -1;
/////////////////////////////////////////////////////////////
ifstream myReadFile;
myReadFile.open(img_input_name);
char out_temp[100];
int** mat_a;
if (myReadFile.is_open()){
std::getline(myReadFile,title1);
std::getline(myReadFile,title2);
myReadFile >> cols;
n_cols = atoi(cols);
//n_cols = 15;
//cout << n_cols << endl;
myReadFile >> rows;
n_rows = atoi(rows);
//n_rows = 15;
//cout << n_rows << endl;
myReadFile >> max_val;
//max_value = atoi(max_val);
//cout << max_value << endl;
/////////////////////////////////////////////////////////////
mat_a = (int** )malloc(sizeof(int*) * n_rows );
mat_a[0] = (int* )malloc(sizeof(int ) * n_rows * n_cols );
for( int i=1 ; i<n_rows ; i++ ){
mat_a[i] = mat_a[i-1]+n_cols;
}
/////////////////////////////////////////////////////////////
int n_temp;
for(int i=0 ; i<n_rows ; i++){
for(int j=0 ; j<n_cols ; j++){
if(!myReadFile.eof()){
myReadFile >> out_temp;
n_temp = atoi(out_temp);
mat_a[i][j] = n_temp;
//cout << n_temp << endl;
}
}
}
}
myReadFile.close();
/////////////////////////////////////////////////////
int n_rows_a = n_rows;
int n_cols_a = n_cols;
int n_rows_b = 3; //n_kernel
int n_cols_b = 3; //n_kernel
//double scalar_kernel = (-1)/(double)256; //escalar_kernel
double scalar_kernel = 1; //solo con static_kernel
//printf("escalar_kernel: %f\n",scalar_kernel);
int n_rows_c = n_rows;
int n_cols_c = n_cols;
//int** mat_a; int** d_mat_a; int** dd_mat_a;
//int** mat_a;
int** d_mat_a; int** dd_mat_a;
double** mat_b; double** d_mat_b; double** dd_mat_b;
int** mat_c; int** d_mat_c; int** dd_mat_c;
create_copy( mat_a, d_mat_a, dd_mat_a, n_rows_a, n_cols_a);
//create( mat_a, d_mat_a, dd_mat_a, n_rows_a, n_cols_a );
create_kernell_static( mat_b, d_mat_b, dd_mat_b, n_rows_b, n_cols_b, scalar_kernel );
//create_kernell_random( mat_b, d_mat_b, dd_mat_b, n_rows_b, n_cols_b );
create( mat_c, d_mat_c, dd_mat_c, n_rows_c, n_cols_c, 0 );
/////////////////////////////////////////
dim3 blockNum(WIDTH_TILE,WIDTH_TILE,1);
dim3 grid((int)(n_cols_c-1+blockNum.x)/blockNum.x,(int)(n_rows_c-1+blockNum.y)/blockNum.y,1);
printf("ty: %d, tx: %d\n",(int)(n_rows_c-1+blockNum.y)/blockNum.y, (int)(n_cols_c-1+blockNum.x)/blockNum.x);
printf("grid_row: %d, grid_col: %d\n",grid.x , grid.y );
////////////////////////////////////////////////////
hipLaunchKernelGGL(( convolution), dim3(grid),dim3(blockNum), 0, 0, dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c);
convolution_serial(mat_a, n_rows_a, n_cols_a, mat_b, n_rows_b, n_cols_b, mat_c, n_rows_c, n_cols_c);
//convolution_complete<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c);
//matrix_mult_shared<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c);
//matrix_mult<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c);
/////////////////////////////////////////////////////
hipMemcpy(mat_c[0],d_mat_c[0],sizeof(int)*n_rows_c*n_cols_c,hipMemcpyDeviceToHost);
//printf("//////////////////////////////////\n");
//printf("//////////////////////////////////\n");
//print(mat_a,n_rows_a,n_cols_a);
printf("//////// KERNELL SHARPEN //////////\n");
print2(mat_b,n_rows_b,n_cols_b);
printf("//////////////////////////////////\n");
//print(mat_c,n_rows_c,n_cols_c);
//////////////////////////////////////////////
double max_matrix = max_value_matrix(mat_c, n_rows_c, n_cols_c);
//printf("<<<<<<<<<<<<<<<<<<<<<%f\n",max_matrix);
ofstream myfile;
myfile.open (img_output_name);
myfile << title1 <<endl;
myfile << title2 <<endl;
myfile << n_cols_c <<" "<< n_rows_c <<endl;
//myfile << max_value <<endl;
myfile << max_matrix <<endl;
for(int i=0 ; i<n_rows_c ; i++){
for(int j=0 ; j<n_cols_c ; j++){
myfile << mat_c[i][j] <<endl;
}
}
myfile.close();
//////////////////////////////////////////////
hipFree(dd_mat_a);
hipFree(dd_mat_b);
hipFree(dd_mat_c);
hipFree(d_mat_a);
hipFree(d_mat_b);
hipFree(d_mat_c);
free(mat_a);
free(mat_b);
free(mat_c);
return 0;
}
| 65aee0043ade043ed000d1dd588f5c08b38fd1df.cu | // Luigy Machaca Arcana
// Computer science - Arequipa, Perú 2017
#include <stdlib.h>
#include <stdio.h>
#include <fstream>
#include <iostream>
#include <string>
using namespace std;
#define WIDTH_TILE 32
void convolution_serial(int** dd_mat_a, int n_rows_a, int n_cols_a ,int** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){
//void conv2(arma::mat (&in), int n, int m, arma::mat (&conv), int p, int q, arma::mat(&out)){
for(int i=0 ; i<n_rows_a ; i++){
for(int j=0 ; j<n_cols_a ; j++){
double offset = 0;
for(int k=0 ; k<n_rows_b ; k++){
for(int l=0 ; l<n_cols_b ; l++){
//double cc = conv.at(k,l);
double cc = dd_mat_b[k][l];
double dd = 0;
if( (i-(int)(n_rows_b/2)+k)>=0 && (j-(int)(n_cols_b/2)+l)>=0 &&
(i-(int)(n_rows_b/2)+k)<n_rows_a && (j-(int)(n_cols_b/2)+l)<n_cols_a ){
dd = dd_mat_a[i-(int)(n_rows_b/2)+k][j-(int)(n_cols_b/2)+l];
//dd = in.at( i-(int)(p/2)+k , j-(int)(q/2)+l );
}
offset += cc*dd;
}
}
//out.at(i,j) = offset>0? offset:0;
dd_mat_c[i][j] = offset>0? offset:0;
//dd_mat_c[i][j] =-3;
}
}
}
__global__
void convolution(int** dd_mat_a, int n_rows_a, int n_cols_a ,double** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){
int n_kernel_row = n_rows_b; //n_cols_b
int n_kernel_col = n_cols_b; //n_cols_b
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if( ((int)(n_kernel_row/2)-1)< row && row<(n_rows_a-(int)(n_kernel_row/2)) &&
((int)(n_kernel_col/2)-1)< col && col<(n_cols_a-(int)(n_kernel_col/2)) ){
double offset = 0;
for(int k=0 ; k<n_kernel_row ; k++){
for(int l=0 ; l<n_kernel_col ; l++){
double cc = dd_mat_b[k][l];
double dd = 0;
dd = (double)dd_mat_a[row-(int)(n_kernel_row/2)+k][col-(int)(n_kernel_col/2)+l];
offset += cc*dd;
}
}
offset = offset>0?offset:0;
dd_mat_c[row][col] = offset;
//dd_mat_c[row][col] = dd_mat_a[row][col];
}
}
__global__
void convolution_complete(int** dd_mat_a, int n_rows_a, int n_cols_a ,double** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){
int n_kernel_row = n_rows_b; //n_cols_b
int n_kernel_col = n_cols_b; //n_cols_b
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if( row<n_rows_a && col<n_cols_a ){
double offset = 0;
for(int k=0 ; k<n_kernel_row ; k++){
for(int l=0 ; l<n_kernel_col ; l++){
double cc = dd_mat_b[k][l];
double dd = 0;
//dd = dd_mat_a[row-(int)(n_kernel_row/2)+k][col-(int)(n_kernel_col/2)+l];
if( (row-(int)(n_kernel_row/2)+k)>=0 && (row-(int)(n_kernel_row/2)+k)<n_rows_a &&
(col-(int)(n_kernel_col/2)+l)>=0 && (col-(int)(n_kernel_col/2)+l)<n_cols_a ){
dd = dd_mat_a[row-(int)(n_kernel_row/2)+k][col-(int)(n_kernel_col/2)+l];
}
offset += cc*dd;
}
}
offset = -1/256*offset;
offset = offset>0?offset:0;
offset = (int)offset%255 + 1;
dd_mat_c[row][col] = offset;
//dd_mat_c[row][col] = -1;
}
}
__global__ void matrix_mult_shared(int** dd_mat_a, int n_rows_a, int n_cols_a ,int** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){
__shared__ int Mds[WIDTH_TILE][WIDTH_TILE];
__shared__ int Nds[WIDTH_TILE][WIDTH_TILE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int value = 0;
int row = by*WIDTH_TILE + ty;
int col = bx*WIDTH_TILE + tx;
int width = n_cols_a; //n_cols_a == n_rows_b
int k;
for( k=0 ; k<(int)(width-1+WIDTH_TILE)/(int)WIDTH_TILE ; ++k ){
if (k*WIDTH_TILE+tx < n_cols_a && row < n_rows_a){
Mds[ty][tx] = dd_mat_a[row][k*WIDTH_TILE+tx];
}
else{
Mds[ty][tx] = 0;
}
if (k*WIDTH_TILE+ty < n_rows_b && col < n_cols_b){
Nds[ty][tx] = dd_mat_b[k*WIDTH_TILE+ty][col];
}
else{
Nds[ty][tx] = 0;
}
__syncthreads();
int m;
for(m=0 ; m<WIDTH_TILE ; ++m){
value += Mds[ty][m]*Nds[m][tx];
}
__syncthreads();
}
if(row<n_rows_c && col<n_cols_c){
dd_mat_c[row][col]=value;
}
}
__global__ void matrix_mult(int** dd_mat_a, int n_rows_a, int n_cols_a ,int** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){
int value=0;
int tx=threadIdx.x;
int ty=threadIdx.y;
int x = tx + blockIdx.x*blockDim.x;
int y = ty + blockIdx.y*blockDim.y;
if( y<n_rows_c && x<n_cols_c ){
int i;
for(i=0 ; i<n_cols_a ; i++){
value += dd_mat_a[y][i] * dd_mat_b[i][x];
}
dd_mat_c[y][x]=value;
}
}
void fill(int** mat, int n, int m){
srand(time(0));
int i,j;
for(i=0; i<n ;i++){
for(j=0; j<m ;j++)
//mat[i][j] = rand()%3+1;
mat[i][j] = 1;
}
}
void fill_value(int** mat,int n, int m, int value=0){
int i,j;
for(i=0;i<n;i++)
for(j=0;j<m;j++)
mat[i][j] = value;
}
void print(int** mat,int n, int m){
int i,j;
for(i=0; i<n ;i++){
for(j=0; j<m ;j++)
printf("%d ",mat[i][j]);
printf("\n");
}
}
void print2(double** mat,int n, int m){
int i,j;
for(i=0; i<n ;i++){
for(j=0; j<m ;j++)
printf("%f ",mat[i][j]);
printf("\n");
}
}
double max_value_matrix(int** mat,int n, int m){
int i,j;
int max = -100000;
for(i=0; i<n ;i++){
for(j=0; j<m ;j++){
max = (mat[i][j] > max)?mat[i][j]:max;
}
}
return max;
}
void normalize(int** mat,int n, int m, double value_normalice){
int i,j;
for(i=0; i<n ;i++){
for(j=0; j<m ;j++){
mat[i][j] = mat[i][j] / (double)value_normalice ;
}
}
}
void create_copy(int**& mat, int**& d_mat, int**& dd_mat, int n_rows, int n_cols){
int i;
int size_row = sizeof(int*) * n_rows;
d_mat = (int**) malloc(size_row);
cudaMalloc((void**)& d_mat[0], sizeof(int) * n_rows * n_cols );
cudaMemcpy( d_mat[0], mat[0], sizeof(int) * n_rows * n_cols ,cudaMemcpyHostToDevice);
for( i=1 ; i<n_rows ; i++ ){
d_mat[i] = (d_mat[0]+i*n_cols);
}
cudaMalloc((void***)& dd_mat, size_row );
cudaMemcpy( dd_mat, d_mat, size_row, cudaMemcpyHostToDevice );
}
void create(int**& mat, int**& d_mat, int**& dd_mat, int n_rows, int n_cols, int fillValue=-1){
int i;
mat = (int** )malloc(sizeof(int*) * n_rows );
mat[0] = (int* )malloc(sizeof(int ) * n_rows * n_cols );
for( i=1 ; i<n_rows ; i++ ){
mat[i] = mat[i-1]+n_cols;
}
if(fillValue==-1){
fill(mat,n_rows,n_cols);
}
else{
fill_value(mat,n_rows,n_cols,fillValue);
}
int size_row = sizeof(int*) * n_rows;
d_mat = (int**) malloc(size_row);
cudaMalloc((void**)& d_mat[0], sizeof(int) * n_rows * n_cols );
cudaMemcpy( d_mat[0], mat[0], sizeof(int) * n_rows * n_cols ,cudaMemcpyHostToDevice);
for( i=1 ; i<n_rows ; i++ ){
d_mat[i] = (d_mat[0]+i*n_cols);
}
cudaMalloc((void***)& dd_mat, size_row );
cudaMemcpy( dd_mat, d_mat, size_row, cudaMemcpyHostToDevice );
}
void create_kernell_random(double**& mat, double**& d_mat, double**& dd_mat, int n_rows, int n_cols){
int i,j;
mat = (double** )malloc(sizeof(double*) * n_rows );
mat[0] = (double* )malloc(sizeof(double ) * n_rows * n_cols );
for( i=1 ; i<n_rows ; i++ ){
mat[i] = mat[i-1]+n_cols;
}
srand(time(0));
for(i=0; i<n_rows ;i++){
for(j=0; j<n_cols ;j++){
mat[i][j] = (double)(rand()%100-50);
//mat[i][j] = 1;
}
}
int size_row = sizeof(double*) * n_rows;
d_mat = (double**) malloc(size_row);
cudaMalloc((void**)& d_mat[0], sizeof(double) * n_rows * n_cols );
cudaMemcpy( d_mat[0], mat[0], sizeof(double) * n_rows * n_cols ,cudaMemcpyHostToDevice);
for( i=1 ; i<n_rows ; i++ ){
d_mat[i] = (d_mat[0]+i*n_cols);
}
cudaMalloc((void***)& dd_mat, size_row );
cudaMemcpy( dd_mat, d_mat, size_row, cudaMemcpyHostToDevice );
}
/////////////////////////////////////////////////////////////////////////
///////////////// Filter Edge detection
/////////////////////////////////////////////////////////////////////////
void fill_kernel_3x3_Edge_detection(double** mat, int n, int m, double scalar_kernel=1){
mat[0][0]=0; mat[0][1]= 1; mat[0][2]=0;
mat[1][0]=1; mat[1][1]=-4; mat[1][2]=1;
mat[2][0]=0; mat[2][1]= 1; mat[2][2]=0;
for(int i=0 ; i<n ; i++){
for(int j=0 ; j<m ; j++){
mat[i][j]=scalar_kernel*mat[i][j];
}
}
}
/////////////////////////////////////////////////////////////////////////
///////////////// Filter Sharpen
/////////////////////////////////////////////////////////////////////////
void fill_kernel_3x3_Sharpen(double** mat, int n, int m, double scalar_kernel=1){
// 0 -1 0
//-1 5 -1
// 0 -1 0
mat[0][0]=0; mat[0][1]=-1; mat[0][2]=0;
mat[1][0]=-1; mat[1][1]=5; mat[1][2]=-1;
mat[2][0]=0; mat[2][1]=-1; mat[2][2]=0;
for(int i=0 ; i<n ; i++){
for(int j=0 ; j<m ; j++){
mat[i][j]=scalar_kernel*mat[i][j];
}
}
}
/////////////////////////////////////////////////////////////////////////
///////////////// Gaussian blur
/////////////////////////////////////////////////////////////////////////
void fill_kernel_5x5_Gaussian_blur(double** mat, int n, int m, double scalar_kernel=1){
// 1 4 6 4 1
// 4 16 24 16 4
//(-1/256) // 6 24 -476 24 6
// 4 16 24 16 4
// 1 4 6 4 1
mat[0][0]=1; mat[0][1]=4 ; mat[0][2]=6 ; mat[0][3]=4 ; mat[0][4]=1;
mat[1][0]=4; mat[1][1]=16; mat[1][2]=24 ; mat[1][3]=16; mat[1][4]=4;
mat[2][0]=6; mat[2][1]=24; mat[2][2]=-476; mat[2][3]=24; mat[2][4]=6;
mat[3][0]=4; mat[3][1]=16; mat[3][2]=24 ; mat[3][3]=16; mat[3][4]=4;
mat[4][0]=1; mat[4][1]=4 ; mat[4][2]=6 ; mat[4][3]=4 ; mat[4][4]=1;
printf("2222xxxxxxx %.25f\n",scalar_kernel);
for(int i=0 ; i<n ; i++){
for(int j=0 ; j<m ; j++){
mat[i][j] = scalar_kernel*mat[i][j];
}
}
}
void create_kernell_static(double**& mat, double**& d_mat, double**& dd_mat, int n_rows, int n_cols, double scalar_kernel=1){
int i;
mat = (double** )malloc(sizeof(double*) * n_rows );
mat[0] = (double* )malloc(sizeof(double ) * n_rows * n_cols );
for( i=1 ; i<n_rows ; i++ ){
mat[i] = mat[i-1]+n_cols;
}
fill_kernel_3x3_Edge_detection(mat,n_rows,n_cols, scalar_kernel);
//fill_kernel_3x3_Sharpen(mat,n_rows,n_cols, scalar_kernel);
//fill_kernel_5x5_Gaussian_blur(mat,n_rows,n_cols, scalar_kernel);
int size_row = sizeof(double*) * n_rows;
d_mat = (double**) malloc(size_row);
cudaMalloc((void**)& d_mat[0], sizeof(double) * n_rows * n_cols );
cudaMemcpy( d_mat[0], mat[0], sizeof(double) * n_rows * n_cols ,cudaMemcpyHostToDevice);
for( i=1 ; i<n_rows ; i++ ){
d_mat[i] = (d_mat[0]+i*n_cols);
}
cudaMalloc((void***)& dd_mat, size_row );
cudaMemcpy( dd_mat, d_mat, size_row, cudaMemcpyHostToDevice );
}
int main(int argc, char *argv[]){
printf("//////////////////////////////////\n");
char temp1[350];
strcpy (temp1 , argv[1]);
const char* img_input_name = temp1;
char temp2[150];
strcpy (temp2 , argv[1]);
strcat (temp2 , ".out.random.kernel.EdgeDetec.pgm");
const char* img_output_name = temp2;
printf ("name in: %s\n",img_input_name);
printf ("name out: %s\n",img_output_name);
string title1,title2;
char rows[15];
char cols[15];
char max_val[15];
int n_rows = -1;
int n_cols = -1;
//int max_value = -1;
/////////////////////////////////////////////////////////////
ifstream myReadFile;
myReadFile.open(img_input_name);
char out_temp[100];
int** mat_a;
if (myReadFile.is_open()){
std::getline(myReadFile,title1);
std::getline(myReadFile,title2);
myReadFile >> cols;
n_cols = atoi(cols);
//n_cols = 15;
//cout << n_cols << endl;
myReadFile >> rows;
n_rows = atoi(rows);
//n_rows = 15;
//cout << n_rows << endl;
myReadFile >> max_val;
//max_value = atoi(max_val);
//cout << max_value << endl;
/////////////////////////////////////////////////////////////
mat_a = (int** )malloc(sizeof(int*) * n_rows );
mat_a[0] = (int* )malloc(sizeof(int ) * n_rows * n_cols );
for( int i=1 ; i<n_rows ; i++ ){
mat_a[i] = mat_a[i-1]+n_cols;
}
/////////////////////////////////////////////////////////////
int n_temp;
for(int i=0 ; i<n_rows ; i++){
for(int j=0 ; j<n_cols ; j++){
if(!myReadFile.eof()){
myReadFile >> out_temp;
n_temp = atoi(out_temp);
mat_a[i][j] = n_temp;
//cout << n_temp << endl;
}
}
}
}
myReadFile.close();
/////////////////////////////////////////////////////
int n_rows_a = n_rows;
int n_cols_a = n_cols;
int n_rows_b = 3; //n_kernel
int n_cols_b = 3; //n_kernel
//double scalar_kernel = (-1)/(double)256; //escalar_kernel
double scalar_kernel = 1; //solo con static_kernel
//printf("escalar_kernel: %f\n",scalar_kernel);
int n_rows_c = n_rows;
int n_cols_c = n_cols;
//int** mat_a; int** d_mat_a; int** dd_mat_a;
//int** mat_a;
int** d_mat_a; int** dd_mat_a;
double** mat_b; double** d_mat_b; double** dd_mat_b;
int** mat_c; int** d_mat_c; int** dd_mat_c;
create_copy( mat_a, d_mat_a, dd_mat_a, n_rows_a, n_cols_a);
//create( mat_a, d_mat_a, dd_mat_a, n_rows_a, n_cols_a );
create_kernell_static( mat_b, d_mat_b, dd_mat_b, n_rows_b, n_cols_b, scalar_kernel );
//create_kernell_random( mat_b, d_mat_b, dd_mat_b, n_rows_b, n_cols_b );
create( mat_c, d_mat_c, dd_mat_c, n_rows_c, n_cols_c, 0 );
/////////////////////////////////////////
dim3 blockNum(WIDTH_TILE,WIDTH_TILE,1);
dim3 grid((int)(n_cols_c-1+blockNum.x)/blockNum.x,(int)(n_rows_c-1+blockNum.y)/blockNum.y,1);
printf("ty: %d, tx: %d\n",(int)(n_rows_c-1+blockNum.y)/blockNum.y, (int)(n_cols_c-1+blockNum.x)/blockNum.x);
printf("grid_row: %d, grid_col: %d\n",grid.x , grid.y );
////////////////////////////////////////////////////
convolution<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c);
convolution_serial(mat_a, n_rows_a, n_cols_a, mat_b, n_rows_b, n_cols_b, mat_c, n_rows_c, n_cols_c);
//convolution_complete<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c);
//matrix_mult_shared<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c);
//matrix_mult<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c);
/////////////////////////////////////////////////////
cudaMemcpy(mat_c[0],d_mat_c[0],sizeof(int)*n_rows_c*n_cols_c,cudaMemcpyDeviceToHost);
//printf("//////////////////////////////////\n");
//printf("//////////////////////////////////\n");
//print(mat_a,n_rows_a,n_cols_a);
printf("//////// KERNELL SHARPEN //////////\n");
print2(mat_b,n_rows_b,n_cols_b);
printf("//////////////////////////////////\n");
//print(mat_c,n_rows_c,n_cols_c);
//////////////////////////////////////////////
double max_matrix = max_value_matrix(mat_c, n_rows_c, n_cols_c);
//printf("<<<<<<<<<<<<<<<<<<<<<%f\n",max_matrix);
ofstream myfile;
myfile.open (img_output_name);
myfile << title1 <<endl;
myfile << title2 <<endl;
myfile << n_cols_c <<" "<< n_rows_c <<endl;
//myfile << max_value <<endl;
myfile << max_matrix <<endl;
for(int i=0 ; i<n_rows_c ; i++){
for(int j=0 ; j<n_cols_c ; j++){
myfile << mat_c[i][j] <<endl;
}
}
myfile.close();
//////////////////////////////////////////////
cudaFree(dd_mat_a);
cudaFree(dd_mat_b);
cudaFree(dd_mat_c);
cudaFree(d_mat_a);
cudaFree(d_mat_b);
cudaFree(d_mat_c);
free(mat_a);
free(mat_b);
free(mat_c);
return 0;
}
|
80f295d97c703ef145a4a16410894d812d8b9456.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zlarfbx.cu normal z -> c, Tue Feb 9 16:05:30 2016
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
//==============================================================================
extern "C"
__global__ void
magma_cgemv_kernel1(int m, const magmaFloatComplex * __restrict__ V, int ldv,
const magmaFloatComplex * __restrict__ c,
magmaFloatComplex *dwork)
{
const int i = threadIdx.x;
const magmaFloatComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
magmaFloatComplex lsum;
/* lsum := v**H * C */
lsum = MAGMA_C_ZERO;
for (int j = i; j < m; j += BLOCK_SIZE)
lsum += MAGMA_C_MUL( MAGMA_C_CONJ( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i == 0)
dwork [blockIdx.x] = sum[0];
}
//==============================================================================
/* -----------------------------------------------------------------------------
Call
magma_cgemv_kernel3<<< n, BLOCK_SIZE, 0, queue->cuda_stream() >>>(m, V, ldv, c, dwork, tau)
to compute
CGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1)
and to set c[0] to 1.
i.e.,
work = -tau[0] V**H c
----------------------------------------------------------------------------- */
extern "C"
__global__ void
magma_cgemv_kernel3(int m, const magmaFloatComplex * __restrict__ V, int ldv, magmaFloatComplex *c,
magmaFloatComplex *dwork, magmaFloatComplex *tau)
{
const int i = threadIdx.x;
const magmaFloatComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
magmaFloatComplex lsum;
if (i == 0)
c[0] = MAGMA_C_ONE;
/* lsum := v**H * C */
lsum = MAGMA_C_ZERO;
for (int j = i; j < m; j += BLOCK_SIZE)
lsum += MAGMA_C_MUL( MAGMA_C_CONJ( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i == 0)
dwork [blockIdx.x] = -tau[0]*sum[0];
}
//==============================================================================
extern "C"
__global__ void
magma_cgemv_kernel2(int m, int n, const magmaFloatComplex * __restrict__ V, int ldv,
const magmaFloatComplex * __restrict__ x, magmaFloatComplex *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
magmaFloatComplex lsum;
V += j;
lsum = MAGMA_C_ZERO;
if (j < m) {
for (int k=0; k < n; k++)
lsum += MAGMA_C_MUL( V[k*ldv], x[k]);
c[j] -= lsum;
}
}
//==============================================================================
/*
Apply a complex block reflector H to a complex vector C from the left
(i.e., C = H C). H is represented in the form
H = I - V T V**H
where T is the complex k-by-k upper triangular matrix in the
representation of the block reflector, and V is a complex block of
k elementary reflectors.
*/
extern "C" void
magma_clarfbx_gpu_q(
magma_int_t m, magma_int_t k,
magmaFloatComplex_ptr V, magma_int_t ldv,
magmaFloatComplex_ptr dT, magma_int_t ldt,
magmaFloatComplex_ptr c,
magmaFloatComplex_ptr dwork,
magma_queue_t queue )
{
/* dwork = V**H c */
hipLaunchKernelGGL(( magma_cgemv_kernel1)
, dim3(k), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
m, V, ldv, c, dwork);
/* dwork = T**H dwork */
hipLaunchKernelGGL(( magma_ctrmv_tkernel)
, dim3(k), dim3(k), 0, queue->cuda_stream() ,
dT, ldt, dwork, dwork+k);
/* c = c - V dwork */
dim3 blocks3( magma_ceildiv( m, BLOCK_SIZE ) );
dim3 threads3( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_cgemv_kernel2)
, dim3(blocks3), dim3(threads3), 0, queue->cuda_stream() ,
m, k, V, ldv, dwork+k, c);
}
//==============================================================================
| 80f295d97c703ef145a4a16410894d812d8b9456.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zlarfbx.cu normal z -> c, Tue Feb 9 16:05:30 2016
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
//==============================================================================
extern "C"
__global__ void
magma_cgemv_kernel1(int m, const magmaFloatComplex * __restrict__ V, int ldv,
const magmaFloatComplex * __restrict__ c,
magmaFloatComplex *dwork)
{
const int i = threadIdx.x;
const magmaFloatComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
magmaFloatComplex lsum;
/* lsum := v**H * C */
lsum = MAGMA_C_ZERO;
for (int j = i; j < m; j += BLOCK_SIZE)
lsum += MAGMA_C_MUL( MAGMA_C_CONJ( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i == 0)
dwork [blockIdx.x] = sum[0];
}
//==============================================================================
/* -----------------------------------------------------------------------------
Call
magma_cgemv_kernel3<<< n, BLOCK_SIZE, 0, queue->cuda_stream() >>>(m, V, ldv, c, dwork, tau)
to compute
CGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1)
and to set c[0] to 1.
i.e.,
work = -tau[0] V**H c
----------------------------------------------------------------------------- */
extern "C"
__global__ void
magma_cgemv_kernel3(int m, const magmaFloatComplex * __restrict__ V, int ldv, magmaFloatComplex *c,
magmaFloatComplex *dwork, magmaFloatComplex *tau)
{
const int i = threadIdx.x;
const magmaFloatComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
magmaFloatComplex lsum;
if (i == 0)
c[0] = MAGMA_C_ONE;
/* lsum := v**H * C */
lsum = MAGMA_C_ZERO;
for (int j = i; j < m; j += BLOCK_SIZE)
lsum += MAGMA_C_MUL( MAGMA_C_CONJ( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i == 0)
dwork [blockIdx.x] = -tau[0]*sum[0];
}
//==============================================================================
extern "C"
__global__ void
magma_cgemv_kernel2(int m, int n, const magmaFloatComplex * __restrict__ V, int ldv,
const magmaFloatComplex * __restrict__ x, magmaFloatComplex *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
magmaFloatComplex lsum;
V += j;
lsum = MAGMA_C_ZERO;
if (j < m) {
for (int k=0; k < n; k++)
lsum += MAGMA_C_MUL( V[k*ldv], x[k]);
c[j] -= lsum;
}
}
//==============================================================================
/*
Apply a complex block reflector H to a complex vector C from the left
(i.e., C = H C). H is represented in the form
H = I - V T V**H
where T is the complex k-by-k upper triangular matrix in the
representation of the block reflector, and V is a complex block of
k elementary reflectors.
*/
extern "C" void
magma_clarfbx_gpu_q(
magma_int_t m, magma_int_t k,
magmaFloatComplex_ptr V, magma_int_t ldv,
magmaFloatComplex_ptr dT, magma_int_t ldt,
magmaFloatComplex_ptr c,
magmaFloatComplex_ptr dwork,
magma_queue_t queue )
{
/* dwork = V**H c */
magma_cgemv_kernel1
<<< k, BLOCK_SIZE, 0, queue->cuda_stream() >>>
(m, V, ldv, c, dwork);
/* dwork = T**H dwork */
magma_ctrmv_tkernel
<<< k, k, 0, queue->cuda_stream() >>>
( dT, ldt, dwork, dwork+k);
/* c = c - V dwork */
dim3 blocks3( magma_ceildiv( m, BLOCK_SIZE ) );
dim3 threads3( BLOCK_SIZE );
magma_cgemv_kernel2
<<< blocks3, threads3, 0, queue->cuda_stream() >>>
( m, k, V, ldv, dwork+k, c);
}
//==============================================================================
|
5539c3245d0d31d05e9edee12b146fe098ee4f9f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include "Utilities.cuh"
#include "Bessel.cuh"
#include "cuFFT_auxiliary.cuh"
#include "NFFT2_2D.cuh"
#include "InputOutput.cuh"
hipfftHandle NFFT2_2D_GPUplan;
#define BLOCKSIZE_INTERPOLATION 256
#define BLOCK_SIZE_x 16
#define BLOCK_SIZE_y 16
#define BLOCKSIZE_BESSEL 32
#define DEBUG
#define cc 2
#define K 6
#define IDX2R(i,j,N) (((i)*(N))+(j))
#define pi_double 3.141592653589793238463
__constant__ double alpha=(2.-1./cc)*pi_double-0.01;
__constant__ int constant1_GPU;
__constant__ int constant2_GPU;
__constant__ int constant3_GPU;
__constant__ int constant4_GPU;
__constant__ int constant5_GPU;
__constant__ int constant6_GPU;
__constant__ int constant7_GPU;
__constant__ int constant8_GPU;
__constant__ int constant9_GPU;
__constant__ int constant10_GPU;
__constant__ float constant11_GPU_f;
__constant__ float constant12_GPU_f;
__constant__ double constant11_GPU;
__constant__ double constant12_GPU;
__constant__ int constant13_GPU;
__constant__ int constant14_GPU;
/**************************/
/* cuFFT PLAN CALCULATION */
/**************************/
void Calculate_cuFFT_plan_C2C_NFFT2_2D(const int N1, const int N2) { cufftSafeCall(hipfftPlan2d(&NFFT2_2D_GPUplan, cc*N1, cc*N2, HIPFFT_C2C)); }
void Calculate_cuFFT_plan_Z2Z_NFFT2_2D(const int N1, const int N2) { cufftSafeCall(hipfftPlan2d(&NFFT2_2D_GPUplan, cc*N1, cc*N2, HIPFFT_Z2Z)); }
/*************************/
/* FFTW PLAN CALCULATION */
/*************************/
void Destroy_cuFFT_plan_NFFT2_2D() { cufftSafeCall(hipfftDestroy(NFFT2_2D_GPUplan)); }
/***********************************************/
/* MODIFIED BESSEL FUNCTION CALCULATION KERNEL */
/***********************************************/
template<class T>
__global__ void Kernel_Bessel(T * __restrict__ Bessel_vector, const int N)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i<N) {
T xi = (static_cast<T>(2*pi_double)*(i-(N/2)))/(cc*N);
Bessel_vector[i] = static_cast<T>(1)/(bessi0(static_cast<T>(K)*sqrt(static_cast<T>(alpha*alpha)-xi*xi)));
}
}
/**************************/
/* DECIMATION AND SCALING */
/**************************/
__global__ void Decimation_and_Scaling(const float2* __restrict__ data, float2* __restrict__ result, const float* __restrict__ Bessel_vector_x, const float* __restrict__ Bessel_vector_y, const int N1, const int N2)
{
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if((((i >= constant1_GPU) && (i < constant2_GPU)) && ((j >= constant3_GPU) && (j < constant4_GPU))))
{
float a = Bessel_vector_x[i-constant1_GPU]*Bessel_vector_y[j-constant3_GPU];
result[IDX2R(i-constant1_GPU,j-constant3_GPU,N2)].x=data[IDX2R(i,j,cc*N2)].x*a;
result[IDX2R(i-constant1_GPU,j-constant3_GPU,N2)].y=data[IDX2R(i,j,cc*N2)].y*a;
}
}
__global__ void Decimation_and_Scaling(const double2* __restrict__ data, double2* __restrict__ result, const double* __restrict__ Bessel_vector_x, const double* __restrict__ Bessel_vector_y, const int N1, const int N2)
{
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if((((i >= constant1_GPU) && (i < constant2_GPU)) && ((j >= constant3_GPU) && (j < constant4_GPU))))
{
double a = Bessel_vector_x[i-constant1_GPU]*Bessel_vector_y[j-constant3_GPU];
result[IDX2R(i-constant1_GPU,j-constant3_GPU,N2)].x=data[IDX2R(i,j,cc*N2)].x*a;
result[IDX2R(i-constant1_GPU,j-constant3_GPU,N2)].y=data[IDX2R(i,j,cc*N2)].y*a;
}
}
/*****************************************************************************************/
/* KERNEL FUNCTION TO CALCULATE SERIES TERMS FOR INTERPOLATION USING DYNAMIC PARALLELISM */
/*****************************************************************************************/
__global__ void series_terms(float2 temp_data, float2* __restrict__ result, const float r_cc_points1, const float cc_diff1, const float r_cc_points2, const float cc_diff2, const int N1, const int N2)
{
int m = threadIdx.x;
int n = threadIdx.y;
float tempd, phi_cap;
float P = K*K-(cc_diff1-(m-K))*(cc_diff1-(m-K));
if(P<0.) {tempd=rsqrt(-P); phi_cap = (static_cast<float>(1./pi_double))*((sin(alpha/tempd))*tempd); }
else if(P>0.f) {tempd=rsqrt(P); phi_cap = static_cast<float>(1./pi_double)*((sinh(alpha/tempd))*tempd); }
else phi_cap = static_cast<float>(alpha/pi_double);
P = K*K-(cc_diff2-(n-K))*(cc_diff2-(n-K));
if(P<0.) {tempd=rsqrt(-P); phi_cap = phi_cap*static_cast<float>(1./pi_double)*((sin(alpha/tempd))*tempd); }
else if(P>0.f) {tempd=rsqrt(P); phi_cap = phi_cap*static_cast<float>(1./pi_double)*((sinh(alpha/tempd))*tempd); }
else phi_cap = static_cast<float>(phi_cap*alpha/pi_double);
int PP1 = modulo((r_cc_points1+(m-K)+N1*cc/2),(cc*N1));
int PP2 = modulo((r_cc_points2+(n-K)+N2*cc/2),(cc*N2));
atomicAdd(&result[IDX2R(PP1,PP2,cc*N2)].x,temp_data.x*phi_cap);
atomicAdd(&result[IDX2R(PP1,PP2,cc*N2)].y,temp_data.y*phi_cap);
}
__global__ void series_terms(double2 temp_data, double2* __restrict__ result, const double r_cc_points1, const double cc_diff1, const double r_cc_points2, const double cc_diff2, const int N1, const int N2)
{
int m = threadIdx.x;
int n = threadIdx.y;
double tempd, phi_cap;
double P = K*K-(cc_diff1-(m-K))*(cc_diff1-(m-K));
if(P<0.) {tempd=rsqrt(-P); phi_cap = (1./pi_double)*((sin(alpha/tempd))*tempd); }
else if(P>0.) {tempd=rsqrt(P); phi_cap = (1./pi_double)*((sinh(alpha/tempd))*tempd); }
else phi_cap = alpha/pi_double;
P = K*K-(cc_diff2-(n-K))*(cc_diff2-(n-K));
if(P<0.) {tempd=rsqrt(-P); phi_cap = phi_cap*(1./pi_double)*((sin(alpha/tempd))*tempd); }
else if(P>0.) {tempd=rsqrt(P); phi_cap = phi_cap*(1./pi_double)*((sinh(alpha/tempd))*tempd); }
else phi_cap = phi_cap*alpha/pi_double;
int PP1 = modulo((r_cc_points1+(m-K)+N1*cc/2),(cc*N1));
int PP2 = modulo((r_cc_points2+(n-K)+N2*cc/2),(cc*N2));
atomicAdd(&result[IDX2R(PP1,PP2,cc*N2)].x,temp_data.x*phi_cap);
atomicAdd(&result[IDX2R(PP1,PP2,cc*N2)].y,temp_data.y*phi_cap);
}
/************************/
/* INTERPOLATION 2D NED */
/************************/
// --- Code using dynamic parallelism
__global__ void Interpolation_NFFT2_2D_GPUKernel(const float2* __restrict__ data, float2* __restrict__ result, const float* __restrict__ x, const float* __restrict__ y, const int N1, const int N2, int M)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
float cc_points1=cc*x[i];
float r_cc_points1=rint(cc_points1); // It is the mu in Fourmont's paper
const float cc_diff1 = cc_points1-r_cc_points1;
float cc_points2=cc*y[i];
float r_cc_points2=rint(cc_points2); // It is the mu in Fourmont's paper
const float cc_diff2 = cc_points2-r_cc_points2;
# if __CUDA_ARCH__ >= 350
float2 temp_data = data[i];
dim3 dimBlock(13,13); dim3 dimGrid(1,1);
if(i<M)hipLaunchKernelGGL(( series_terms), dim3(dimGrid),dim3(dimBlock), 0, 0, temp_data,result,r_cc_points1,cc_diff1,r_cc_points2,cc_diff2,N1,N2);
# else
int PP1, PP2;
float P, tempd;
float phi_cap1, phi_cap2;
if(i<M) {
for(int m=0; m<constant9_GPU; m++) {
P = constant10_GPU-(cc_diff1-(m-K))*(cc_diff1-(m-K));
PP1 = modulo((r_cc_points1+(m-K)+N1*cc/2),constant13_GPU);
if(P<0.) {tempd=rsqrt(-P); phi_cap1 = constant11_GPU_f*((sin(alpha/tempd))*tempd); }
else if(P>0.f) {tempd=rsqrt(P); phi_cap1 = constant11_GPU_f*((sinh(alpha/tempd))*tempd); }
else phi_cap1 = constant12_GPU_f;
for(int n=0; n<constant9_GPU; n++) {
P = constant10_GPU-(cc_diff2-(n-K))*(cc_diff2-(n-K));
PP2 = modulo((r_cc_points2+(n-K)+N2*cc/2),constant14_GPU);
if(P<0.f) {tempd=rsqrt(-P); phi_cap2 = phi_cap1*constant11_GPU_f*((sin(alpha/tempd))*tempd); }
else if(P>0.f) {tempd=rsqrt(P); phi_cap2 = phi_cap1*constant11_GPU_f*((sinh(alpha/tempd))*tempd); }
else phi_cap2 = phi_cap1*constant12_GPU_f;
atomicAdd(&result[IDX2R(PP1,PP2,constant14_GPU)].x,data[i].x*phi_cap2);
atomicAdd(&result[IDX2R(PP1,PP2,constant14_GPU)].y,data[i].y*phi_cap2);
}
}
}
# endif
}
// --- Code using dynamic parallelism
__global__ void Interpolation_NFFT2_2D_GPUKernel(const double2* __restrict__ data, double2* __restrict__ result, const double* __restrict__ x, const double* __restrict__ y, const int N1, const int N2, int M)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
double cc_points1=cc*x[i];
double r_cc_points1=rint(cc_points1); // It is the mu in Fourmont's paper
const double cc_diff1 = cc_points1-r_cc_points1;
double cc_points2=cc*y[i];
double r_cc_points2=rint(cc_points2); // It is the mu in Fourmont's paper
const double cc_diff2 = cc_points2-r_cc_points2;
# if __CUDA_ARCH__ >= 350
double2 temp_data = data[i];
dim3 dimBlock(13,13); dim3 dimGrid(1,1);
if(i<M)hipLaunchKernelGGL(( series_terms), dim3(dimGrid),dim3(dimBlock), 0, 0, temp_data,result,r_cc_points1,cc_diff1,r_cc_points2,cc_diff2,N1,N2);
# else
int PP1, PP2;
double P, tempd;
double phi_cap1, phi_cap2;
if(i<M) {
for(int m=0; m<constant9_GPU; m++) {
P = constant10_GPU-(cc_diff1-(m-K))*(cc_diff1-(m-K));
PP1 = modulo((r_cc_points1+(m-K)+N1*cc/2),constant13_GPU);
if(P<0.) {tempd=rsqrt(-P); phi_cap1 = constant11_GPU*((sin(alpha/tempd))*tempd); }
else if(P>0.) {tempd=rsqrt(P); phi_cap1 = constant11_GPU*((sinh(alpha/tempd))*tempd); }
else phi_cap1 = constant12_GPU;
for(int n=0; n<constant9_GPU; n++) {
P = constant10_GPU-(cc_diff2-(n-K))*(cc_diff2-(n-K));
PP2 = modulo((r_cc_points2+(n-K)+N2*cc/2),constant14_GPU);
if(P<0.) {tempd=rsqrt(-P); phi_cap2 = phi_cap1*constant11_GPU*((sin(alpha/tempd))*tempd); }
else if(P>0.) {tempd=rsqrt(P); phi_cap2 = phi_cap1*constant11_GPU*((sinh(alpha/tempd))*tempd); }
else phi_cap2 = phi_cap1*constant12_GPU;
atomicAdd(&result[IDX2R(PP1,PP2,constant14_GPU)].x,data[i].x*phi_cap2);
atomicAdd(&result[IDX2R(PP1,PP2,constant14_GPU)].y,data[i].y*phi_cap2);
}
}
}
# endif
}
// --- Code not using dynamic parallelism
//__global__ void Interpolation_NFFT2_2D_GPUKernel(const double2* __restrict__ data, double2* __restrict__ result, const double* __restrict__ x, const double* __restrict__ y, const int N1, const int N2, int M)
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
//
// double cc_points1=cc*x[i];
// double r_cc_points1=rint(cc_points1);
// const double cc_diff1 = cc_points1-r_cc_points1;
//
// double cc_points2=cc*y[i];
// double r_cc_points2=rint(cc_points2);
// const double cc_diff2 = cc_points2-r_cc_points2;
//
// int PP1, PP2;
// double P, tempd;
//
// double phi_cap1, phi_cap2;
//
// if(i<M) {
//
// for(int m=0; m<constant9_GPU; m++) {
//
// P = constant10_GPU-(cc_diff1-(m-K))*(cc_diff1-(m-K));
//
// PP1 = modulo((r_cc_points1+(m-K)+N1*cc/2),constant13_GPU);
//
// if(P<0.) {tempd=rsqrt(-P); phi_cap1 = constant11_GPU*((sin(alpha/tempd))*tempd); }
// else if(P>0.) {tempd=rsqrt(P); phi_cap1 = constant11_GPU*((sinh(alpha/tempd))*tempd); }
// else phi_cap1 = constant12_GPU;
//
// for(int n=0; n<constant9_GPU; n++) {
//
// P = constant10_GPU-(cc_diff2-(n-K))*(cc_diff2-(n-K));
//
// PP2 = modulo((r_cc_points2+(n-K)+N2*cc/2),constant14_GPU);
//
// if(P<0.) {tempd=rsqrt(-P); phi_cap2 = phi_cap1*constant11_GPU*((sin(alpha/tempd))*tempd); }
// else if(P>0.) {tempd=rsqrt(P); phi_cap2 = phi_cap1*constant11_GPU*((sinh(alpha/tempd))*tempd); }
// else phi_cap2 = phi_cap1*constant12_GPU;
//
// atomicAdd(&result[IDX2R(PP1,PP2,constant14_GPU)].x,data[i].x*phi_cap2);
// atomicAdd(&result[IDX2R(PP1,PP2,constant14_GPU)].y,data[i].y*phi_cap2);
//
// }
// }
// }
//}
/***************************/
/* NUFFT NED 2D EVALUATION */
/***************************/
void NFFT2_2D_GPU(float2 * __restrict__ result, const float2 * __restrict__ data, const float * __restrict__ x, const float * __restrict__ y, const int N1, const int N2, const int M)
{
float alfa_CPU=static_cast<float>((2.-1./cc)*pi_double-0.01);
int constant1_CPU = (cc-1)*N1/2; gpuErrchk(hipMemcpyToSymbol(constant1_GPU, &constant1_CPU, sizeof(int)));
int constant2_CPU = (cc+1)*N1/2; gpuErrchk(hipMemcpyToSymbol(constant2_GPU, &constant2_CPU, sizeof(int)));
int constant3_CPU = (cc-1)*N2/2; gpuErrchk(hipMemcpyToSymbol(constant3_GPU, &constant3_CPU, sizeof(int)));
int constant4_CPU = (cc+1)*N2/2; gpuErrchk(hipMemcpyToSymbol(constant4_GPU, &constant4_CPU, sizeof(int)));
int constant5_CPU = (cc-1)*N1/2-N1/2; gpuErrchk(hipMemcpyToSymbol(constant5_GPU, &constant5_CPU, sizeof(int)));
int constant6_CPU = (cc-1)*N2/2-N2/2; gpuErrchk(hipMemcpyToSymbol(constant6_GPU, &constant6_CPU, sizeof(int)));
int constant7_CPU = 2.*pi_double/(cc*N1); gpuErrchk(hipMemcpyToSymbol(constant7_GPU, &constant7_CPU, sizeof(int)));
int constant8_CPU = 2.*pi_double/(cc*N2); gpuErrchk(hipMemcpyToSymbol(constant8_GPU, &constant8_CPU, sizeof(int)));
int constant9_CPU = 2*K+1; gpuErrchk(hipMemcpyToSymbol(constant9_GPU, &constant9_CPU, sizeof(int)));
int constant10_CPU = K*K; gpuErrchk(hipMemcpyToSymbol(constant10_GPU, &constant10_CPU, sizeof(int)));
float constant11_CPU = static_cast<float>(1./pi_double); gpuErrchk(hipMemcpyToSymbol(constant11_GPU_f, &constant11_CPU, sizeof(float)));
float constant12_CPU = static_cast<float>(alfa_CPU/pi_double); gpuErrchk(hipMemcpyToSymbol(constant12_GPU_f, &constant12_CPU, sizeof(float)));
int constant13_CPU = cc*N1; gpuErrchk(hipMemcpyToSymbol(constant13_GPU, &constant13_CPU, sizeof(int)));
int constant14_CPU = cc*N2; gpuErrchk(hipMemcpyToSymbol(constant14_GPU, &constant14_CPU, sizeof(int)));
/* CALCULATION OF BESSEL FUNCTIONS */
float* Bessel_vector_x; gpuErrchk(hipMalloc((void **)&Bessel_vector_x,sizeof(float)*N1));
float* Bessel_vector_y; gpuErrchk(hipMalloc((void **)&Bessel_vector_y,sizeof(float)*N2));
dim3 dimBlock01(BLOCKSIZE_BESSEL,1); dim3 dimGrid01(iDivUp(N1,BLOCKSIZE_BESSEL));
hipLaunchKernelGGL(( Kernel_Bessel), dim3(dimGrid01),dim3(dimBlock01), 0, 0, Bessel_vector_x, N1);
#ifdef DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
dim3 dimBlock02(BLOCKSIZE_BESSEL,1); dim3 dimGrid02(iDivUp(N2,BLOCKSIZE_BESSEL));
hipLaunchKernelGGL(( Kernel_Bessel), dim3(dimGrid02),dim3(dimBlock02), 0, 0, Bessel_vector_y, N2);
#ifdef DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
/* ALLOCATIONS AND INITIALIZATIONS */
hipfftComplex *temp_result; gpuErrchk(hipMalloc((void **)&temp_result,sizeof(hipfftComplex)*cc*N1*cc*N2));
gpuErrchk(hipMemset(temp_result,0,sizeof(hipfftComplex)*cc*N1*cc*N2));
/* INTERPOLATION */
dim3 dimBlock1(BLOCKSIZE_INTERPOLATION,1); dim3 dimGrid1(iDivUp(M,BLOCKSIZE_INTERPOLATION));
# if __CUDA_ARCH__ >= 350
gpuErrchk(hipDeviceSetLimit(hipLimitDevRuntimePendingLaunchCount, dimGrid1.x*BLOCKSIZE_INTERPOLATION));
# endif
hipLaunchKernelGGL(( Interpolation_NFFT2_2D_GPUKernel), dim3(dimGrid1),dim3(dimBlock1), 0, 0, data,temp_result,x,y,N1,N2,M);
#ifdef DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
/* FFTSHIFT 2D */
dim3 dimBlock2(BLOCK_SIZE_x,BLOCK_SIZE_y);
dim3 dimGrid2((cc*N1)/BLOCK_SIZE_x + ((cc*N1)%BLOCK_SIZE_x == 0 ? 0:1),(cc*N2)/BLOCK_SIZE_y + ((cc*N2)%BLOCK_SIZE_y == 0 ? 0:1));
hipLaunchKernelGGL(( fftshift_2D), dim3(dimGrid2),dim3(dimBlock2), 0, 0, temp_result,cc*N1,cc*N2);
#ifdef DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
/* FFT */
cufftSafeCall(hipfftExecC2C(NFFT2_2D_GPUplan, temp_result, temp_result, HIPFFT_FORWARD));
/* FFTSHIFT 2D */
hipLaunchKernelGGL(( fftshift_2D), dim3(dimGrid2),dim3(dimBlock2), 0, 0, temp_result,cc*N1,cc*N2);
#ifdef DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
/* DECIMATION AND SCALING */
dim3 dimBlock3(BLOCK_SIZE_x,BLOCK_SIZE_y);
dim3 dimGrid3((cc*N2)/BLOCK_SIZE_x + ((cc*N2)%BLOCK_SIZE_x == 0 ? 0:1),(cc*N1)/BLOCK_SIZE_y + ((cc*N1)%BLOCK_SIZE_y == 0 ? 0:1));
hipLaunchKernelGGL(( Decimation_and_Scaling), dim3(dimGrid3),dim3(dimBlock3), 0, 0, temp_result,result,Bessel_vector_x,Bessel_vector_y,N1,N2);
#ifdef DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
gpuErrchk(hipFree(Bessel_vector_x));
gpuErrchk(hipFree(Bessel_vector_y));
gpuErrchk(hipFree(temp_result));
}
void NFFT2_2D_GPU(double2 * __restrict__ result, const double2 * __restrict__ data, const double * __restrict__ x, const double * __restrict__ y, const int N1, const int N2, const int M)
{
double alfa_CPU=(2.-1./cc)*pi_double-0.01;
int constant1_CPU = (cc-1)*N1/2; gpuErrchk(hipMemcpyToSymbol(constant1_GPU, &constant1_CPU, sizeof(int)));
int constant2_CPU = (cc+1)*N1/2; gpuErrchk(hipMemcpyToSymbol(constant2_GPU, &constant2_CPU, sizeof(int)));
int constant3_CPU = (cc-1)*N2/2; gpuErrchk(hipMemcpyToSymbol(constant3_GPU, &constant3_CPU, sizeof(int)));
int constant4_CPU = (cc+1)*N2/2; gpuErrchk(hipMemcpyToSymbol(constant4_GPU, &constant4_CPU, sizeof(int)));
int constant5_CPU = (cc-1)*N1/2-N1/2; gpuErrchk(hipMemcpyToSymbol(constant5_GPU, &constant5_CPU, sizeof(int)));
int constant6_CPU = (cc-1)*N2/2-N2/2; gpuErrchk(hipMemcpyToSymbol(constant6_GPU, &constant6_CPU, sizeof(int)));
int constant7_CPU = 2.*pi_double/(cc*N1); gpuErrchk(hipMemcpyToSymbol(constant7_GPU, &constant7_CPU, sizeof(int)));
int constant8_CPU = 2.*pi_double/(cc*N2); gpuErrchk(hipMemcpyToSymbol(constant8_GPU, &constant8_CPU, sizeof(int)));
int constant9_CPU = 2*K+1; gpuErrchk(hipMemcpyToSymbol(constant9_GPU, &constant9_CPU, sizeof(int)));
int constant10_CPU = K*K; gpuErrchk(hipMemcpyToSymbol(constant10_GPU, &constant10_CPU, sizeof(int)));
double constant11_CPU = 1./pi_double; gpuErrchk(hipMemcpyToSymbol(constant11_GPU, &constant11_CPU, sizeof(double)));
double constant12_CPU = alfa_CPU/pi_double; gpuErrchk(hipMemcpyToSymbol(constant12_GPU, &constant12_CPU, sizeof(double)));
int constant13_CPU = cc*N1; gpuErrchk(hipMemcpyToSymbol(constant13_GPU, &constant13_CPU, sizeof(int)));
int constant14_CPU = cc*N2; gpuErrchk(hipMemcpyToSymbol(constant14_GPU, &constant14_CPU, sizeof(int)));
/* CALCULATION OF BESSEL FUNCTIONS */
double* Bessel_vector_x; gpuErrchk(hipMalloc((void **)&Bessel_vector_x,sizeof(double)*N1));
double* Bessel_vector_y; gpuErrchk(hipMalloc((void **)&Bessel_vector_y,sizeof(double)*N2));
dim3 dimBlock01(BLOCKSIZE_BESSEL,1); dim3 dimGrid01(iDivUp(N1,BLOCKSIZE_BESSEL));
hipLaunchKernelGGL(( Kernel_Bessel), dim3(dimGrid01),dim3(dimBlock01), 0, 0, Bessel_vector_x, N1);
#ifdef DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
dim3 dimBlock02(BLOCKSIZE_BESSEL,1); dim3 dimGrid02(iDivUp(N2,BLOCKSIZE_BESSEL));
hipLaunchKernelGGL(( Kernel_Bessel), dim3(dimGrid02),dim3(dimBlock02), 0, 0, Bessel_vector_y, N2);
#ifdef DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
/* ALLOCATIONS AND INITIALIZATIONS */
hipfftDoubleComplex *temp_result; gpuErrchk(hipMalloc((void **)&temp_result,sizeof(hipfftDoubleComplex)*cc*N1*cc*N2));
gpuErrchk(hipMemset(temp_result,0,sizeof(hipfftDoubleComplex)*cc*N1*cc*N2));
/* INTERPOLATION */
dim3 dimBlock1(BLOCKSIZE_INTERPOLATION,1); dim3 dimGrid1(iDivUp(M,BLOCKSIZE_INTERPOLATION));
# if __CUDA_ARCH__ >= 350
gpuErrchk(hipDeviceSetLimit(hipLimitDevRuntimePendingLaunchCount, dimGrid1.x*BLOCKSIZE_INTERPOLATION));
# endif
hipLaunchKernelGGL(( Interpolation_NFFT2_2D_GPUKernel), dim3(dimGrid1),dim3(dimBlock1), 0, 0, data,temp_result,x,y,N1,N2,M);
#ifdef DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
/* FFTSHIFT 2D */
dim3 dimBlock2(BLOCK_SIZE_x,BLOCK_SIZE_y);
dim3 dimGrid2((cc*N1)/BLOCK_SIZE_x + ((cc*N1)%BLOCK_SIZE_x == 0 ? 0:1),(cc*N2)/BLOCK_SIZE_y + ((cc*N2)%BLOCK_SIZE_y == 0 ? 0:1));
hipLaunchKernelGGL(( fftshift_2D), dim3(dimGrid2),dim3(dimBlock2), 0, 0, temp_result,cc*N1,cc*N2);
#ifdef DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
/* FFT */
cufftSafeCall(hipfftExecZ2Z(NFFT2_2D_GPUplan, temp_result, temp_result, HIPFFT_FORWARD));
/* FFTSHIFT 2D */
hipLaunchKernelGGL(( fftshift_2D), dim3(dimGrid2),dim3(dimBlock2), 0, 0, temp_result,cc*N1,cc*N2);
#ifdef DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
/* DECIMATION AND SCALING */
dim3 dimBlock3(BLOCK_SIZE_x,BLOCK_SIZE_y);
dim3 dimGrid3((cc*N2)/BLOCK_SIZE_x + ((cc*N2)%BLOCK_SIZE_x == 0 ? 0:1),(cc*N1)/BLOCK_SIZE_y + ((cc*N1)%BLOCK_SIZE_y == 0 ? 0:1));
hipLaunchKernelGGL(( Decimation_and_Scaling), dim3(dimGrid3),dim3(dimBlock3), 0, 0, temp_result,result,Bessel_vector_x,Bessel_vector_y,N1,N2);
#ifdef DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
gpuErrchk(hipFree(Bessel_vector_x));
gpuErrchk(hipFree(Bessel_vector_y));
gpuErrchk(hipFree(temp_result));
}
| 5539c3245d0d31d05e9edee12b146fe098ee4f9f.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <cufft.h>
#include "Utilities.cuh"
#include "Bessel.cuh"
#include "cuFFT_auxiliary.cuh"
#include "NFFT2_2D.cuh"
#include "InputOutput.cuh"
cufftHandle NFFT2_2D_GPUplan;
#define BLOCKSIZE_INTERPOLATION 256
#define BLOCK_SIZE_x 16
#define BLOCK_SIZE_y 16
#define BLOCKSIZE_BESSEL 32
#define DEBUG
#define cc 2
#define K 6
#define IDX2R(i,j,N) (((i)*(N))+(j))
#define pi_double 3.141592653589793238463
__constant__ double alpha=(2.-1./cc)*pi_double-0.01;
__constant__ int constant1_GPU;
__constant__ int constant2_GPU;
__constant__ int constant3_GPU;
__constant__ int constant4_GPU;
__constant__ int constant5_GPU;
__constant__ int constant6_GPU;
__constant__ int constant7_GPU;
__constant__ int constant8_GPU;
__constant__ int constant9_GPU;
__constant__ int constant10_GPU;
__constant__ float constant11_GPU_f;
__constant__ float constant12_GPU_f;
__constant__ double constant11_GPU;
__constant__ double constant12_GPU;
__constant__ int constant13_GPU;
__constant__ int constant14_GPU;
/**************************/
/* cuFFT PLAN CALCULATION */
/**************************/
void Calculate_cuFFT_plan_C2C_NFFT2_2D(const int N1, const int N2) { cufftSafeCall(cufftPlan2d(&NFFT2_2D_GPUplan, cc*N1, cc*N2, CUFFT_C2C)); }
void Calculate_cuFFT_plan_Z2Z_NFFT2_2D(const int N1, const int N2) { cufftSafeCall(cufftPlan2d(&NFFT2_2D_GPUplan, cc*N1, cc*N2, CUFFT_Z2Z)); }
/*************************/
/* FFTW PLAN CALCULATION */
/*************************/
void Destroy_cuFFT_plan_NFFT2_2D() { cufftSafeCall(cufftDestroy(NFFT2_2D_GPUplan)); }
/***********************************************/
/* MODIFIED BESSEL FUNCTION CALCULATION KERNEL */
/***********************************************/
template<class T>
__global__ void Kernel_Bessel(T * __restrict__ Bessel_vector, const int N)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i<N) {
T xi = (static_cast<T>(2*pi_double)*(i-(N/2)))/(cc*N);
Bessel_vector[i] = static_cast<T>(1)/(bessi0(static_cast<T>(K)*sqrt(static_cast<T>(alpha*alpha)-xi*xi)));
}
}
/**************************/
/* DECIMATION AND SCALING */
/**************************/
__global__ void Decimation_and_Scaling(const float2* __restrict__ data, float2* __restrict__ result, const float* __restrict__ Bessel_vector_x, const float* __restrict__ Bessel_vector_y, const int N1, const int N2)
{
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if((((i >= constant1_GPU) && (i < constant2_GPU)) && ((j >= constant3_GPU) && (j < constant4_GPU))))
{
float a = Bessel_vector_x[i-constant1_GPU]*Bessel_vector_y[j-constant3_GPU];
result[IDX2R(i-constant1_GPU,j-constant3_GPU,N2)].x=data[IDX2R(i,j,cc*N2)].x*a;
result[IDX2R(i-constant1_GPU,j-constant3_GPU,N2)].y=data[IDX2R(i,j,cc*N2)].y*a;
}
}
__global__ void Decimation_and_Scaling(const double2* __restrict__ data, double2* __restrict__ result, const double* __restrict__ Bessel_vector_x, const double* __restrict__ Bessel_vector_y, const int N1, const int N2)
{
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if((((i >= constant1_GPU) && (i < constant2_GPU)) && ((j >= constant3_GPU) && (j < constant4_GPU))))
{
double a = Bessel_vector_x[i-constant1_GPU]*Bessel_vector_y[j-constant3_GPU];
result[IDX2R(i-constant1_GPU,j-constant3_GPU,N2)].x=data[IDX2R(i,j,cc*N2)].x*a;
result[IDX2R(i-constant1_GPU,j-constant3_GPU,N2)].y=data[IDX2R(i,j,cc*N2)].y*a;
}
}
/*****************************************************************************************/
/* KERNEL FUNCTION TO CALCULATE SERIES TERMS FOR INTERPOLATION USING DYNAMIC PARALLELISM */
/*****************************************************************************************/
__global__ void series_terms(float2 temp_data, float2* __restrict__ result, const float r_cc_points1, const float cc_diff1, const float r_cc_points2, const float cc_diff2, const int N1, const int N2)
{
int m = threadIdx.x;
int n = threadIdx.y;
float tempd, phi_cap;
float P = K*K-(cc_diff1-(m-K))*(cc_diff1-(m-K));
if(P<0.) {tempd=rsqrt(-P); phi_cap = (static_cast<float>(1./pi_double))*((sin(alpha/tempd))*tempd); }
else if(P>0.f) {tempd=rsqrt(P); phi_cap = static_cast<float>(1./pi_double)*((sinh(alpha/tempd))*tempd); }
else phi_cap = static_cast<float>(alpha/pi_double);
P = K*K-(cc_diff2-(n-K))*(cc_diff2-(n-K));
if(P<0.) {tempd=rsqrt(-P); phi_cap = phi_cap*static_cast<float>(1./pi_double)*((sin(alpha/tempd))*tempd); }
else if(P>0.f) {tempd=rsqrt(P); phi_cap = phi_cap*static_cast<float>(1./pi_double)*((sinh(alpha/tempd))*tempd); }
else phi_cap = static_cast<float>(phi_cap*alpha/pi_double);
int PP1 = modulo((r_cc_points1+(m-K)+N1*cc/2),(cc*N1));
int PP2 = modulo((r_cc_points2+(n-K)+N2*cc/2),(cc*N2));
atomicAdd(&result[IDX2R(PP1,PP2,cc*N2)].x,temp_data.x*phi_cap);
atomicAdd(&result[IDX2R(PP1,PP2,cc*N2)].y,temp_data.y*phi_cap);
}
__global__ void series_terms(double2 temp_data, double2* __restrict__ result, const double r_cc_points1, const double cc_diff1, const double r_cc_points2, const double cc_diff2, const int N1, const int N2)
{
int m = threadIdx.x;
int n = threadIdx.y;
double tempd, phi_cap;
double P = K*K-(cc_diff1-(m-K))*(cc_diff1-(m-K));
if(P<0.) {tempd=rsqrt(-P); phi_cap = (1./pi_double)*((sin(alpha/tempd))*tempd); }
else if(P>0.) {tempd=rsqrt(P); phi_cap = (1./pi_double)*((sinh(alpha/tempd))*tempd); }
else phi_cap = alpha/pi_double;
P = K*K-(cc_diff2-(n-K))*(cc_diff2-(n-K));
if(P<0.) {tempd=rsqrt(-P); phi_cap = phi_cap*(1./pi_double)*((sin(alpha/tempd))*tempd); }
else if(P>0.) {tempd=rsqrt(P); phi_cap = phi_cap*(1./pi_double)*((sinh(alpha/tempd))*tempd); }
else phi_cap = phi_cap*alpha/pi_double;
int PP1 = modulo((r_cc_points1+(m-K)+N1*cc/2),(cc*N1));
int PP2 = modulo((r_cc_points2+(n-K)+N2*cc/2),(cc*N2));
atomicAdd(&result[IDX2R(PP1,PP2,cc*N2)].x,temp_data.x*phi_cap);
atomicAdd(&result[IDX2R(PP1,PP2,cc*N2)].y,temp_data.y*phi_cap);
}
/************************/
/* INTERPOLATION 2D NED */
/************************/
// --- Code using dynamic parallelism
__global__ void Interpolation_NFFT2_2D_GPUKernel(const float2* __restrict__ data, float2* __restrict__ result, const float* __restrict__ x, const float* __restrict__ y, const int N1, const int N2, int M)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
float cc_points1=cc*x[i];
float r_cc_points1=rint(cc_points1); // It is the mu in Fourmont's paper
const float cc_diff1 = cc_points1-r_cc_points1;
float cc_points2=cc*y[i];
float r_cc_points2=rint(cc_points2); // It is the mu in Fourmont's paper
const float cc_diff2 = cc_points2-r_cc_points2;
# if __CUDA_ARCH__ >= 350
float2 temp_data = data[i];
dim3 dimBlock(13,13); dim3 dimGrid(1,1);
if(i<M) series_terms<<<dimGrid,dimBlock>>>(temp_data,result,r_cc_points1,cc_diff1,r_cc_points2,cc_diff2,N1,N2);
# else
int PP1, PP2;
float P, tempd;
float phi_cap1, phi_cap2;
if(i<M) {
for(int m=0; m<constant9_GPU; m++) {
P = constant10_GPU-(cc_diff1-(m-K))*(cc_diff1-(m-K));
PP1 = modulo((r_cc_points1+(m-K)+N1*cc/2),constant13_GPU);
if(P<0.) {tempd=rsqrt(-P); phi_cap1 = constant11_GPU_f*((sin(alpha/tempd))*tempd); }
else if(P>0.f) {tempd=rsqrt(P); phi_cap1 = constant11_GPU_f*((sinh(alpha/tempd))*tempd); }
else phi_cap1 = constant12_GPU_f;
for(int n=0; n<constant9_GPU; n++) {
P = constant10_GPU-(cc_diff2-(n-K))*(cc_diff2-(n-K));
PP2 = modulo((r_cc_points2+(n-K)+N2*cc/2),constant14_GPU);
if(P<0.f) {tempd=rsqrt(-P); phi_cap2 = phi_cap1*constant11_GPU_f*((sin(alpha/tempd))*tempd); }
else if(P>0.f) {tempd=rsqrt(P); phi_cap2 = phi_cap1*constant11_GPU_f*((sinh(alpha/tempd))*tempd); }
else phi_cap2 = phi_cap1*constant12_GPU_f;
atomicAdd(&result[IDX2R(PP1,PP2,constant14_GPU)].x,data[i].x*phi_cap2);
atomicAdd(&result[IDX2R(PP1,PP2,constant14_GPU)].y,data[i].y*phi_cap2);
}
}
}
# endif
}
// --- Code using dynamic parallelism
__global__ void Interpolation_NFFT2_2D_GPUKernel(const double2* __restrict__ data, double2* __restrict__ result, const double* __restrict__ x, const double* __restrict__ y, const int N1, const int N2, int M)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
double cc_points1=cc*x[i];
double r_cc_points1=rint(cc_points1); // It is the mu in Fourmont's paper
const double cc_diff1 = cc_points1-r_cc_points1;
double cc_points2=cc*y[i];
double r_cc_points2=rint(cc_points2); // It is the mu in Fourmont's paper
const double cc_diff2 = cc_points2-r_cc_points2;
# if __CUDA_ARCH__ >= 350
double2 temp_data = data[i];
dim3 dimBlock(13,13); dim3 dimGrid(1,1);
if(i<M) series_terms<<<dimGrid,dimBlock>>>(temp_data,result,r_cc_points1,cc_diff1,r_cc_points2,cc_diff2,N1,N2);
# else
int PP1, PP2;
double P, tempd;
double phi_cap1, phi_cap2;
if(i<M) {
for(int m=0; m<constant9_GPU; m++) {
P = constant10_GPU-(cc_diff1-(m-K))*(cc_diff1-(m-K));
PP1 = modulo((r_cc_points1+(m-K)+N1*cc/2),constant13_GPU);
if(P<0.) {tempd=rsqrt(-P); phi_cap1 = constant11_GPU*((sin(alpha/tempd))*tempd); }
else if(P>0.) {tempd=rsqrt(P); phi_cap1 = constant11_GPU*((sinh(alpha/tempd))*tempd); }
else phi_cap1 = constant12_GPU;
for(int n=0; n<constant9_GPU; n++) {
P = constant10_GPU-(cc_diff2-(n-K))*(cc_diff2-(n-K));
PP2 = modulo((r_cc_points2+(n-K)+N2*cc/2),constant14_GPU);
if(P<0.) {tempd=rsqrt(-P); phi_cap2 = phi_cap1*constant11_GPU*((sin(alpha/tempd))*tempd); }
else if(P>0.) {tempd=rsqrt(P); phi_cap2 = phi_cap1*constant11_GPU*((sinh(alpha/tempd))*tempd); }
else phi_cap2 = phi_cap1*constant12_GPU;
atomicAdd(&result[IDX2R(PP1,PP2,constant14_GPU)].x,data[i].x*phi_cap2);
atomicAdd(&result[IDX2R(PP1,PP2,constant14_GPU)].y,data[i].y*phi_cap2);
}
}
}
# endif
}
// --- Code not using dynamic parallelism
//__global__ void Interpolation_NFFT2_2D_GPUKernel(const double2* __restrict__ data, double2* __restrict__ result, const double* __restrict__ x, const double* __restrict__ y, const int N1, const int N2, int M)
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
//
// double cc_points1=cc*x[i];
// double r_cc_points1=rint(cc_points1);
// const double cc_diff1 = cc_points1-r_cc_points1;
//
// double cc_points2=cc*y[i];
// double r_cc_points2=rint(cc_points2);
// const double cc_diff2 = cc_points2-r_cc_points2;
//
// int PP1, PP2;
// double P, tempd;
//
// double phi_cap1, phi_cap2;
//
// if(i<M) {
//
// for(int m=0; m<constant9_GPU; m++) {
//
// P = constant10_GPU-(cc_diff1-(m-K))*(cc_diff1-(m-K));
//
// PP1 = modulo((r_cc_points1+(m-K)+N1*cc/2),constant13_GPU);
//
// if(P<0.) {tempd=rsqrt(-P); phi_cap1 = constant11_GPU*((sin(alpha/tempd))*tempd); }
// else if(P>0.) {tempd=rsqrt(P); phi_cap1 = constant11_GPU*((sinh(alpha/tempd))*tempd); }
// else phi_cap1 = constant12_GPU;
//
// for(int n=0; n<constant9_GPU; n++) {
//
// P = constant10_GPU-(cc_diff2-(n-K))*(cc_diff2-(n-K));
//
// PP2 = modulo((r_cc_points2+(n-K)+N2*cc/2),constant14_GPU);
//
// if(P<0.) {tempd=rsqrt(-P); phi_cap2 = phi_cap1*constant11_GPU*((sin(alpha/tempd))*tempd); }
// else if(P>0.) {tempd=rsqrt(P); phi_cap2 = phi_cap1*constant11_GPU*((sinh(alpha/tempd))*tempd); }
// else phi_cap2 = phi_cap1*constant12_GPU;
//
// atomicAdd(&result[IDX2R(PP1,PP2,constant14_GPU)].x,data[i].x*phi_cap2);
// atomicAdd(&result[IDX2R(PP1,PP2,constant14_GPU)].y,data[i].y*phi_cap2);
//
// }
// }
// }
//}
/***************************/
/* NUFFT NED 2D EVALUATION */
/***************************/
void NFFT2_2D_GPU(float2 * __restrict__ result, const float2 * __restrict__ data, const float * __restrict__ x, const float * __restrict__ y, const int N1, const int N2, const int M)
{
float alfa_CPU=static_cast<float>((2.-1./cc)*pi_double-0.01);
int constant1_CPU = (cc-1)*N1/2; gpuErrchk(cudaMemcpyToSymbol(constant1_GPU, &constant1_CPU, sizeof(int)));
int constant2_CPU = (cc+1)*N1/2; gpuErrchk(cudaMemcpyToSymbol(constant2_GPU, &constant2_CPU, sizeof(int)));
int constant3_CPU = (cc-1)*N2/2; gpuErrchk(cudaMemcpyToSymbol(constant3_GPU, &constant3_CPU, sizeof(int)));
int constant4_CPU = (cc+1)*N2/2; gpuErrchk(cudaMemcpyToSymbol(constant4_GPU, &constant4_CPU, sizeof(int)));
int constant5_CPU = (cc-1)*N1/2-N1/2; gpuErrchk(cudaMemcpyToSymbol(constant5_GPU, &constant5_CPU, sizeof(int)));
int constant6_CPU = (cc-1)*N2/2-N2/2; gpuErrchk(cudaMemcpyToSymbol(constant6_GPU, &constant6_CPU, sizeof(int)));
int constant7_CPU = 2.*pi_double/(cc*N1); gpuErrchk(cudaMemcpyToSymbol(constant7_GPU, &constant7_CPU, sizeof(int)));
int constant8_CPU = 2.*pi_double/(cc*N2); gpuErrchk(cudaMemcpyToSymbol(constant8_GPU, &constant8_CPU, sizeof(int)));
int constant9_CPU = 2*K+1; gpuErrchk(cudaMemcpyToSymbol(constant9_GPU, &constant9_CPU, sizeof(int)));
int constant10_CPU = K*K; gpuErrchk(cudaMemcpyToSymbol(constant10_GPU, &constant10_CPU, sizeof(int)));
float constant11_CPU = static_cast<float>(1./pi_double); gpuErrchk(cudaMemcpyToSymbol(constant11_GPU_f, &constant11_CPU, sizeof(float)));
float constant12_CPU = static_cast<float>(alfa_CPU/pi_double); gpuErrchk(cudaMemcpyToSymbol(constant12_GPU_f, &constant12_CPU, sizeof(float)));
int constant13_CPU = cc*N1; gpuErrchk(cudaMemcpyToSymbol(constant13_GPU, &constant13_CPU, sizeof(int)));
int constant14_CPU = cc*N2; gpuErrchk(cudaMemcpyToSymbol(constant14_GPU, &constant14_CPU, sizeof(int)));
/* CALCULATION OF BESSEL FUNCTIONS */
float* Bessel_vector_x; gpuErrchk(cudaMalloc((void **)&Bessel_vector_x,sizeof(float)*N1));
float* Bessel_vector_y; gpuErrchk(cudaMalloc((void **)&Bessel_vector_y,sizeof(float)*N2));
dim3 dimBlock01(BLOCKSIZE_BESSEL,1); dim3 dimGrid01(iDivUp(N1,BLOCKSIZE_BESSEL));
Kernel_Bessel<<<dimGrid01,dimBlock01>>>(Bessel_vector_x, N1);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
dim3 dimBlock02(BLOCKSIZE_BESSEL,1); dim3 dimGrid02(iDivUp(N2,BLOCKSIZE_BESSEL));
Kernel_Bessel<<<dimGrid02,dimBlock02>>>(Bessel_vector_y, N2);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
/* ALLOCATIONS AND INITIALIZATIONS */
cufftComplex *temp_result; gpuErrchk(cudaMalloc((void **)&temp_result,sizeof(cufftComplex)*cc*N1*cc*N2));
gpuErrchk(cudaMemset(temp_result,0,sizeof(cufftComplex)*cc*N1*cc*N2));
/* INTERPOLATION */
dim3 dimBlock1(BLOCKSIZE_INTERPOLATION,1); dim3 dimGrid1(iDivUp(M,BLOCKSIZE_INTERPOLATION));
# if __CUDA_ARCH__ >= 350
gpuErrchk(cudaDeviceSetLimit(cudaLimitDevRuntimePendingLaunchCount, dimGrid1.x*BLOCKSIZE_INTERPOLATION));
# endif
Interpolation_NFFT2_2D_GPUKernel<<<dimGrid1,dimBlock1>>>(data,temp_result,x,y,N1,N2,M);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
/* FFTSHIFT 2D */
dim3 dimBlock2(BLOCK_SIZE_x,BLOCK_SIZE_y);
dim3 dimGrid2((cc*N1)/BLOCK_SIZE_x + ((cc*N1)%BLOCK_SIZE_x == 0 ? 0:1),(cc*N2)/BLOCK_SIZE_y + ((cc*N2)%BLOCK_SIZE_y == 0 ? 0:1));
fftshift_2D<<<dimGrid2,dimBlock2>>>(temp_result,cc*N1,cc*N2);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
/* FFT */
cufftSafeCall(cufftExecC2C(NFFT2_2D_GPUplan, temp_result, temp_result, CUFFT_FORWARD));
/* FFTSHIFT 2D */
fftshift_2D<<<dimGrid2,dimBlock2>>>(temp_result,cc*N1,cc*N2);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
/* DECIMATION AND SCALING */
dim3 dimBlock3(BLOCK_SIZE_x,BLOCK_SIZE_y);
dim3 dimGrid3((cc*N2)/BLOCK_SIZE_x + ((cc*N2)%BLOCK_SIZE_x == 0 ? 0:1),(cc*N1)/BLOCK_SIZE_y + ((cc*N1)%BLOCK_SIZE_y == 0 ? 0:1));
Decimation_and_Scaling<<<dimGrid3,dimBlock3>>>(temp_result,result,Bessel_vector_x,Bessel_vector_y,N1,N2);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
gpuErrchk(cudaFree(Bessel_vector_x));
gpuErrchk(cudaFree(Bessel_vector_y));
gpuErrchk(cudaFree(temp_result));
}
void NFFT2_2D_GPU(double2 * __restrict__ result, const double2 * __restrict__ data, const double * __restrict__ x, const double * __restrict__ y, const int N1, const int N2, const int M)
{
double alfa_CPU=(2.-1./cc)*pi_double-0.01;
int constant1_CPU = (cc-1)*N1/2; gpuErrchk(cudaMemcpyToSymbol(constant1_GPU, &constant1_CPU, sizeof(int)));
int constant2_CPU = (cc+1)*N1/2; gpuErrchk(cudaMemcpyToSymbol(constant2_GPU, &constant2_CPU, sizeof(int)));
int constant3_CPU = (cc-1)*N2/2; gpuErrchk(cudaMemcpyToSymbol(constant3_GPU, &constant3_CPU, sizeof(int)));
int constant4_CPU = (cc+1)*N2/2; gpuErrchk(cudaMemcpyToSymbol(constant4_GPU, &constant4_CPU, sizeof(int)));
int constant5_CPU = (cc-1)*N1/2-N1/2; gpuErrchk(cudaMemcpyToSymbol(constant5_GPU, &constant5_CPU, sizeof(int)));
int constant6_CPU = (cc-1)*N2/2-N2/2; gpuErrchk(cudaMemcpyToSymbol(constant6_GPU, &constant6_CPU, sizeof(int)));
int constant7_CPU = 2.*pi_double/(cc*N1); gpuErrchk(cudaMemcpyToSymbol(constant7_GPU, &constant7_CPU, sizeof(int)));
int constant8_CPU = 2.*pi_double/(cc*N2); gpuErrchk(cudaMemcpyToSymbol(constant8_GPU, &constant8_CPU, sizeof(int)));
int constant9_CPU = 2*K+1; gpuErrchk(cudaMemcpyToSymbol(constant9_GPU, &constant9_CPU, sizeof(int)));
int constant10_CPU = K*K; gpuErrchk(cudaMemcpyToSymbol(constant10_GPU, &constant10_CPU, sizeof(int)));
double constant11_CPU = 1./pi_double; gpuErrchk(cudaMemcpyToSymbol(constant11_GPU, &constant11_CPU, sizeof(double)));
double constant12_CPU = alfa_CPU/pi_double; gpuErrchk(cudaMemcpyToSymbol(constant12_GPU, &constant12_CPU, sizeof(double)));
int constant13_CPU = cc*N1; gpuErrchk(cudaMemcpyToSymbol(constant13_GPU, &constant13_CPU, sizeof(int)));
int constant14_CPU = cc*N2; gpuErrchk(cudaMemcpyToSymbol(constant14_GPU, &constant14_CPU, sizeof(int)));
/* CALCULATION OF BESSEL FUNCTIONS */
double* Bessel_vector_x; gpuErrchk(cudaMalloc((void **)&Bessel_vector_x,sizeof(double)*N1));
double* Bessel_vector_y; gpuErrchk(cudaMalloc((void **)&Bessel_vector_y,sizeof(double)*N2));
dim3 dimBlock01(BLOCKSIZE_BESSEL,1); dim3 dimGrid01(iDivUp(N1,BLOCKSIZE_BESSEL));
Kernel_Bessel<<<dimGrid01,dimBlock01>>>(Bessel_vector_x, N1);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
dim3 dimBlock02(BLOCKSIZE_BESSEL,1); dim3 dimGrid02(iDivUp(N2,BLOCKSIZE_BESSEL));
Kernel_Bessel<<<dimGrid02,dimBlock02>>>(Bessel_vector_y, N2);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
/* ALLOCATIONS AND INITIALIZATIONS */
cufftDoubleComplex *temp_result; gpuErrchk(cudaMalloc((void **)&temp_result,sizeof(cufftDoubleComplex)*cc*N1*cc*N2));
gpuErrchk(cudaMemset(temp_result,0,sizeof(cufftDoubleComplex)*cc*N1*cc*N2));
/* INTERPOLATION */
dim3 dimBlock1(BLOCKSIZE_INTERPOLATION,1); dim3 dimGrid1(iDivUp(M,BLOCKSIZE_INTERPOLATION));
# if __CUDA_ARCH__ >= 350
gpuErrchk(cudaDeviceSetLimit(cudaLimitDevRuntimePendingLaunchCount, dimGrid1.x*BLOCKSIZE_INTERPOLATION));
# endif
Interpolation_NFFT2_2D_GPUKernel<<<dimGrid1,dimBlock1>>>(data,temp_result,x,y,N1,N2,M);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
/* FFTSHIFT 2D */
dim3 dimBlock2(BLOCK_SIZE_x,BLOCK_SIZE_y);
dim3 dimGrid2((cc*N1)/BLOCK_SIZE_x + ((cc*N1)%BLOCK_SIZE_x == 0 ? 0:1),(cc*N2)/BLOCK_SIZE_y + ((cc*N2)%BLOCK_SIZE_y == 0 ? 0:1));
fftshift_2D<<<dimGrid2,dimBlock2>>>(temp_result,cc*N1,cc*N2);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
/* FFT */
cufftSafeCall(cufftExecZ2Z(NFFT2_2D_GPUplan, temp_result, temp_result, CUFFT_FORWARD));
/* FFTSHIFT 2D */
fftshift_2D<<<dimGrid2,dimBlock2>>>(temp_result,cc*N1,cc*N2);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
/* DECIMATION AND SCALING */
dim3 dimBlock3(BLOCK_SIZE_x,BLOCK_SIZE_y);
dim3 dimGrid3((cc*N2)/BLOCK_SIZE_x + ((cc*N2)%BLOCK_SIZE_x == 0 ? 0:1),(cc*N1)/BLOCK_SIZE_y + ((cc*N1)%BLOCK_SIZE_y == 0 ? 0:1));
Decimation_and_Scaling<<<dimGrid3,dimBlock3>>>(temp_result,result,Bessel_vector_x,Bessel_vector_y,N1,N2);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
gpuErrchk(cudaFree(Bessel_vector_x));
gpuErrchk(cudaFree(Bessel_vector_y));
gpuErrchk(cudaFree(temp_result));
}
|
a4c852379e930fdcdda2c6370ea0de108bdcb67c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "definitions.h"
#include "kernel.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
__global__ void kernel_Err(float *PSF, float *Data, float *Error, int Nfit, int PSFSize){
const int tx = threadIdx.x;
const int bx = blockIdx.x;
const int BlockSize = blockDim.x;
//Prevent read/write past end of array
int j = BlockSize*bx + tx;
if ((bx*BlockSize + tx) >= Nfit) return;
float sse = 0;
float LLR = 0;
for (int i = 0; i < PSFSize; i++)
{
sse += pow(PSF[j*PSFSize + i] - Data[j*PSFSize + i], 2);
LLR += 2 * (PSF[j*PSFSize + i] - Data[j*PSFSize + i] - Data[j*PSFSize + i] * log(PSF[j*PSFSize + i]) + Data[j*PSFSize + i] * log(Data[j*PSFSize + i]));
//mexPrintf("%.05f \n",toppsf[k*psfsize+i]);
}
Error[j * 2] += sse;
Error[j * 2 + 1] += LLR;
}
| a4c852379e930fdcdda2c6370ea0de108bdcb67c.cu | #include "cuda_runtime.h"
#include "definitions.h"
#include "kernel.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
__global__ void kernel_Err(float *PSF, float *Data, float *Error, int Nfit, int PSFSize){
const int tx = threadIdx.x;
const int bx = blockIdx.x;
const int BlockSize = blockDim.x;
//Prevent read/write past end of array
int j = BlockSize*bx + tx;
if ((bx*BlockSize + tx) >= Nfit) return;
float sse = 0;
float LLR = 0;
for (int i = 0; i < PSFSize; i++)
{
sse += pow(PSF[j*PSFSize + i] - Data[j*PSFSize + i], 2);
LLR += 2 * (PSF[j*PSFSize + i] - Data[j*PSFSize + i] - Data[j*PSFSize + i] * log(PSF[j*PSFSize + i]) + Data[j*PSFSize + i] * log(Data[j*PSFSize + i]));
//mexPrintf("%.05f \n",toppsf[k*psfsize+i]);
}
Error[j * 2] += sse;
Error[j * 2 + 1] += LLR;
}
|
8442a32b9d4e92b68e0fa434bd29741225367713.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void UdpateEnergyTerm_movement( float* energy, int energy_dim, int nPatches, float * desc, int desc_dim, int id_desc_move) // whic hindex is the one with movement
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
int idDim = id % energy_dim;
int idPatch = id / energy_dim;
if (id<energy_dim*nPatches){
if (idDim==1) // movement
energy[id] = -desc[idPatch*desc_dim + id_desc_move];
}
} | 8442a32b9d4e92b68e0fa434bd29741225367713.cu | #include "includes.h"
__global__ void UdpateEnergyTerm_movement( float* energy, int energy_dim, int nPatches, float * desc, int desc_dim, int id_desc_move) // whic hindex is the one with movement
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
int idDim = id % energy_dim;
int idPatch = id / energy_dim;
if (id<energy_dim*nPatches){
if (idDim==1) // movement
energy[id] = -desc[idPatch*desc_dim + id_desc_move];
}
} |
5e799b9bd32cb4c748c9219ed1a32bfd023aff2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// SPDX-FileCopyrightText: 2021 CERN
// SPDX-License-Identifier: Apache-2.0
#include "ecs.h"
#include "Kernels.h"
#include "mergeAndSortBlocks.cuh"
#include "CudaHelpers.h"
#include <algorithm>
#include <cstdio>
#include <cstddef>
#include <cstdint>
#include <typeinfo>
#include <chrono>
#include <memory>
#include <iostream>
#include <iomanip>
/// Generic launcher for use with e.g. lambdas.
/// It passes to the function:
/// - task index
/// - block index
/// - track index
/// - the number of the track in each task
template<typename Slab, typename Func_t>
__global__ void launcher(Slab* slab, Func_t func, unsigned int loadFactor = 1)
{
for (unsigned int extraRuns = 0; extraRuns < loadFactor; ++extraRuns) {
for (unsigned int taskIdx = 0; taskIdx < slab->tasks_per_slab; ++taskIdx) {
for (unsigned int globalTrackIdx_inTask = blockIdx.x * blockDim.x + threadIdx.x;
globalTrackIdx_inTask < Slab::tracks_per_block * Slab::blocks_per_task;
globalTrackIdx_inTask += gridDim.x * blockDim.x) {
const auto tIdx = globalTrackIdx_inTask % Slab::tracks_per_block;
const auto bIdx = globalTrackIdx_inTask / Slab::tracks_per_block;
assert(tIdx < Slab::tracks_per_block);
assert(bIdx < Slab::blocks_per_task);
func(slab->tracks[taskIdx][bIdx][tIdx], taskIdx, bIdx, tIdx, globalTrackIdx_inTask);
}
}
}
}
/// A timing helper that invokes a kernel and synchronises the device afterwards.
void time(std::string what, std::string after, std::function<void()> func) {
std::cout << std::setw(30) << std::left << what << std::flush;
const auto start = std::chrono::high_resolution_clock::now();
func();
checkCuda( hipDeviceSynchronize() );
const auto stop = std::chrono::high_resolution_clock::now();
std::cout << "\t" << std::right << std::fixed << std::setprecision(3) << std::setw(12)
<< std::chrono::duration<double, std::milli>(stop - start).count() << " ms."
<< after << std::flush;
}
template<typename Slab>
__global__
void checkGlobalOccupancy(Slab* slab, unsigned int* storeOccupancy, unsigned int* referenceOccupancy = nullptr) {
unsigned int occup = 0;
for (auto& task : slab->tracks) {
for (auto& block : task) {
occup += occupancy<Slab::tracks_per_block, 1>(&block);
}
}
if (referenceOccupancy) {
assert(*referenceOccupancy == occup);
if (*referenceOccupancy != occup) {
printf("Error: Occupancy is wrong: last=%d now=%d\n", *referenceOccupancy, occup);
}
}
*storeOccupancy = occup;
}
int main() {
constexpr unsigned int loadMultiplier = 100;
#define MEASURE_COMPACTING_TIME false
#ifdef NDEBUG
constexpr unsigned int nBlock = SlabSoA::blocks_per_task;
constexpr unsigned int nThread = SlabSoA::tracks_per_block;
#else
constexpr unsigned int nBlock = 2;
constexpr unsigned int nThread = 64;
#endif
// First initialisation, so it doesn't affect timings
hipDeviceSynchronize();
std::unique_ptr<SlabSoA> slabECS;
std::unique_ptr<SlabAoS> slabAoS;
time("ECS construct slabs", "\n", [&](){
slabECS = std::make_unique<SlabSoA>();
});
time("AoS construct slabs", "\n", [&](){
slabAoS = std::make_unique<SlabAoS>();
});
std::unique_ptr<SlabSoA, CudaDeleter> GPUMemECS;
std::unique_ptr<SlabSoA, CudaDeleter> GPUMemECS2;
std::unique_ptr<SlabAoS, CudaDeleter> GPUMemAoS;
std::unique_ptr<SlabAoS, CudaDeleter> GPUMemAoS2;
time("cuda malloc+memcpy", "\n", [&](){
GPUMemECS = make_unique_cuda<SlabSoA>();
GPUMemECS2 = make_unique_cuda<SlabSoA>();
checkCuda( hipMemcpy(GPUMemECS.get(), slabECS.get(), sizeof(SlabSoA), hipMemcpyDefault) );
GPUMemAoS = make_unique_cuda<SlabAoS>();
GPUMemAoS2 = make_unique_cuda<SlabAoS>();
checkCuda( hipMemcpy(GPUMemAoS.get(), slabAoS.get(), sizeof(SlabAoS), hipMemcpyDefault) );
});
// Give all particles an id
// ------------------------------------
time("ECS enumerate_particles", "\t", [&](){
host_enumerate_particles(slabECS.get());
});
time(" GPU", "\t", [&](){
hipLaunchKernelGGL(( run_enumerate_particles), dim3(nBlock), dim3(nThread), 0, 0, GPUMemECS.get(), 1);
});
hipLaunchKernelGGL(( checkEnumeration), dim3(1), dim3(1), 0, 0, GPUMemECS.get());
checkCuda( hipDeviceSynchronize() );
for (auto nRuns : std::initializer_list<unsigned int>{1, loadMultiplier}) {
time(" GPU (lambda, run " + std::to_string(nRuns) + "x)", "\t", [&](){
hipLaunchKernelGGL(( launcher), dim3(nBlock), dim3(nThread), 0, 0, GPUMemECS.get(), []__device__(decltype(GPUMemECS->tracks[0][0][0]) track,
unsigned int taskIdx, unsigned int bIdx, unsigned int tIdx, unsigned int globalTIdx){
const unsigned int particleID = taskIdx * SlabSoA::blocks_per_task * SlabSoA::tracks_per_block
+ globalTIdx;
enumerate_particles(track, particleID);
},
nRuns);
});
}
std::cout << std::endl;
hipLaunchKernelGGL(( checkEnumeration), dim3(1), dim3(1), 0, 0, GPUMemECS.get());
checkCuda( hipDeviceSynchronize() );
time("AoS enumerate_particles", "\t", [&](){
host_enumerate_particles(slabAoS.get());
});
time(" GPU", "\t", [&](){
hipLaunchKernelGGL(( run_enumerate_particles), dim3(nBlock), dim3(nThread), 0, 0, GPUMemAoS.get());
});
hipLaunchKernelGGL(( checkEnumeration), dim3(1), dim3(1), 0, 0, GPUMemAoS.get());
checkCuda( hipDeviceSynchronize() );
for (auto nRuns : std::initializer_list<unsigned int>{1, loadMultiplier}) {
time(" GPU (lambda, run " + std::to_string(nRuns) + "x)", "\t", [&](){
hipLaunchKernelGGL(( launcher), dim3(nBlock), dim3(nThread), 0, 0, GPUMemAoS.get(), []__device__(decltype(GPUMemAoS->tracks[0][0][0]) track,
unsigned int taskIdx, unsigned int bIdx, unsigned int tIdx, unsigned int globalTIdx){
const unsigned int particleID = taskIdx * SlabSoA::blocks_per_task * SlabSoA::tracks_per_block
+ globalTIdx;
enumerate_particles(track, particleID);
},
nRuns);
});
}
std::cout << std::endl;
hipLaunchKernelGGL(( checkEnumeration), dim3(1), dim3(1), 0, 0, GPUMemAoS.get());
checkCuda( hipDeviceSynchronize() );
// Seed all rngs on GPU
// --------------------
time("ECS seed rng", "\n", [&](){
hipLaunchKernelGGL(( launcher), dim3(nBlock), dim3(nThread), 0, 0, GPUMemECS.get(), []__device__(decltype(GPUMemECS->tracks[0][0][0]) track,
unsigned int taskIdx, unsigned int bIdx, unsigned int tIdx, unsigned int globalTIdx){
seed_rng(track);
});
});
time("AoS seed rng", "\n", [&](){
hipLaunchKernelGGL(( launcher), dim3(nBlock), dim3(nThread), 0, 0, GPUMemAoS.get(), []__device__(decltype(GPUMemAoS->tracks[0][0][0]) track,
unsigned int taskIdx, unsigned int bIdx, unsigned int tIdx, unsigned int globalTIdx){
seed_rng(track);
});
});
// Initialise position, momentum and energy
// ----------------------------------------
time("ECS compute_energy", "\t", [&](){
host_compute_energy(slabECS.get());
});
time(" GPU (run 1x, invoke as kernel)", "\t", [&](){
hipLaunchKernelGGL(( run_compute_energy), dim3(nBlock), dim3(nThread), 0, 0, GPUMemECS.get());
});
for (auto nRuns : std::initializer_list<unsigned int>{1, loadMultiplier}) {
time(" GPU (run " + std::to_string(nRuns) + "x, invoke as lambda)", "\t", [&](){
hipLaunchKernelGGL(( launcher), dim3(nBlock), dim3(nThread), 0, 0, GPUMemECS.get(), []__device__(decltype(GPUMemECS->tracks[0][0][0]) track,
unsigned int taskIdx, unsigned int bIdx, unsigned int tIdx, unsigned int globalTIdx){
init_pos_mom(track);
compute_energy(track);
},
nRuns);
});
}
std::cout << std::endl;
time("AoS compute_energy", "\t", [&](){
host_compute_energy(slabAoS.get());
});
time(" GPU (run 1x, invoke as kernel)", "\t", [&](){
hipLaunchKernelGGL(( run_compute_energy), dim3(nBlock), dim3(nThread), 0, 0, GPUMemAoS.get());
});
for (auto nRuns : std::initializer_list<unsigned int>{1, loadMultiplier}) {
time(" GPU (run " + std::to_string(nRuns) + "x, invoke as lambda)", "\t", [&](){
hipLaunchKernelGGL(( launcher), dim3(nBlock), dim3(nThread), 0, 0, GPUMemAoS.get(), [=]__device__(decltype(GPUMemAoS->tracks[0][0][0]) track,
unsigned int taskIdx, unsigned int bIdx, unsigned int tIdx, unsigned int globalTIdx){
init_pos_mom(track);
compute_energy(track);
},
nRuns);
});
}
std::cout << std::endl;
// Print some particles
// ------------------------
std::cout << "Particles from ECS, CPU:\n";
slabECS->tracks[0][0].dump(0);
slabECS->tracks[0][0].dump(1);
slabECS->tracks[0][1].dump(2);
slabECS->tracks[1][1].dump(3);
std::cout << "Particles from AoS, CPU:\n";
slabAoS->tracks[0][0*SlabSoA::tracks_per_block + 0].dump(0);
slabAoS->tracks[0][0*SlabSoA::tracks_per_block + 1].dump(0);
slabAoS->tracks[0][1*SlabSoA::tracks_per_block + 2].dump(0);
slabAoS->tracks[1][1*SlabSoA::tracks_per_block + 3].dump(0);
time("Memcpy back", "\n", [&](){
checkCuda( hipMemcpy(slabECS.get(), GPUMemECS.get(), sizeof(SlabSoA), hipMemcpyDefault) );
checkCuda( hipMemcpy(slabAoS.get(), GPUMemAoS.get(), sizeof(SlabAoS), hipMemcpyDefault) );
});
std::cout << "Particles from ECS, GPU:\n";
slabECS->tracks[0][0].dump(0);
slabECS->tracks[0][0].dump(1);
slabECS->tracks[0][1].dump(2);
slabECS->tracks[1][1].dump(3);
std::cout << "Particles from AoS, GPU:\n";
slabAoS->tracks[0][0*SlabSoA::tracks_per_block + 0].dump(0);
slabAoS->tracks[0][0*SlabSoA::tracks_per_block + 1].dump(0);
slabAoS->tracks[0][1*SlabSoA::tracks_per_block + 2].dump(0);
slabAoS->tracks[1][1*SlabSoA::tracks_per_block + 3].dump(0);
// Save current state, so we can run compactification multiple times
time("Memcpy GPU to GPU", "\n", [&](){
checkCuda( hipMemcpy(GPUMemECS2.get(), GPUMemECS.get(), sizeof(SlabSoA), hipMemcpyDefault) );
checkCuda( hipMemcpy(GPUMemAoS2.get(), GPUMemAoS.get(), sizeof(SlabAoS), hipMemcpyDefault) );
});
// Advance particle by momentum vector with random magnitude
// ---------------------------------------------------------
// As a second step, advance the particles and kill them randomly
constexpr float survivalProbability = 0.97f;
unsigned int* occup;
hipMallocManaged(&occup, sizeof(unsigned int));
time("\nECS advance_by_random_distance", "\t", [&](){
run_advance_by_random_distance(slabECS.get());
});
for (auto nRuns : std::initializer_list<unsigned int>{1, loadMultiplier}) {
time(" GPU (run " + std::to_string(nRuns) + "x)", "\t", [&](){
hipLaunchKernelGGL(( run_advance_by_random_distance_and_kill), dim3(nBlock), dim3(nThread), 0, 0, GPUMemECS.get(), nRuns);
});
}
time(" GPU (run " + std::to_string(loadMultiplier) + "x, create holes)", "\n", [&](){
hipLaunchKernelGGL(( run_advance_by_random_distance_and_kill), dim3(nBlock), dim3(nThread), 0, 0, GPUMemECS.get(), loadMultiplier, survivalProbability);
});
time("AoS advance_by_random_distance", "\t", [&](){
run_advance_by_random_distance(slabAoS.get());
});
for (auto nRuns : std::initializer_list<unsigned int>{1, loadMultiplier}) {
time(" GPU (run " + std::to_string(nRuns) + "x)", "\t", [&](){
hipLaunchKernelGGL(( run_advance_by_random_distance_and_kill), dim3(nBlock), dim3(nThread), 0, 0, GPUMemAoS.get(), nRuns);
});
}
time(" GPU (run " + std::to_string(loadMultiplier) + "x, create holes)", "\n", [&](){
hipLaunchKernelGGL(( run_advance_by_random_distance_and_kill), dim3(nBlock), dim3(nThread), 0, 0, GPUMemAoS.get(), loadMultiplier, survivalProbability);
});
checkCuda( hipMemcpy(GPUMemECS.get(), GPUMemECS2.get(), sizeof(SlabSoA), hipMemcpyDefault) );
time(" GPU (run separate launches " + std::to_string(loadMultiplier) + "x, create holes, compact)", "\n", [&](){
for (unsigned int run = 0; run < loadMultiplier; ++run) {
hipLaunchKernelGGL(( launcher), dim3(nBlock), dim3(nThread), 0, 0, GPUMemECS.get(), []__device__(decltype(GPUMemECS->tracks[0][0][0]) track,
unsigned int taskIdx, unsigned int bIdx, unsigned int tIdx, unsigned int globalTIdx) {
advance_by_random_distance(track);
kill_random_particles(track, survivalProbability);
});
if (run % 5 == 0) {
if constexpr (MEASURE_COMPACTING_TIME) {
hipDeviceSynchronize();
hipLaunchKernelGGL(( checkGlobalOccupancy), dim3(1), dim3(1), 0, 0, GPUMemECS.get(), occup);
hipDeviceSynchronize();
time("ECS Compactification run", "", [&](){
hipLaunchKernelGGL(( run_merge_blocks), dim3(nBlock), dim3(nThread), 0, 0, GPUMemECS.get());
});
hipLaunchKernelGGL(( checkGlobalOccupancy), dim3(1), dim3(1), 0, 0, GPUMemECS.get(), occup, occup);
hipDeviceSynchronize();
std::cout << " Occupancy= " << *occup << std::endl;
} else {
hipLaunchKernelGGL(( run_merge_blocks), dim3(nBlock), dim3(nThread), 0, 0, GPUMemECS.get());
}
}
}
});
checkCuda( hipMemcpy(GPUMemAoS.get(), GPUMemAoS2.get(), sizeof(SlabAoS), hipMemcpyDefault) );
time(" GPU (run separate launches " + std::to_string(loadMultiplier) + "x, create holes, compact)", "\n", [&](){
for (unsigned int run = 0; run < loadMultiplier; ++run) {
hipLaunchKernelGGL(( launcher), dim3(nBlock), dim3(nThread), 0, 0, GPUMemAoS.get(), []__device__(decltype(GPUMemAoS->tracks[0][0][0]) track,
unsigned int taskIdx, unsigned int bIdx, unsigned int tIdx, unsigned int globalTIdx) {
advance_by_random_distance(track);
kill_random_particles(track, survivalProbability);
});
if (run % 5 == 0) {
if constexpr (MEASURE_COMPACTING_TIME) {
hipDeviceSynchronize();
hipLaunchKernelGGL(( checkGlobalOccupancy), dim3(1), dim3(1), 0, 0, GPUMemAoS.get(), occup);
hipDeviceSynchronize();
time("AoS Compactification run", "", [&](){
hipLaunchKernelGGL(( run_merge_blocks), dim3(nBlock), dim3(nThread), 0, 0, GPUMemAoS.get());
});
hipLaunchKernelGGL(( checkGlobalOccupancy), dim3(1), dim3(1), 0, 0, GPUMemAoS.get(), occup, occup);
hipDeviceSynchronize();
std::cout << " Occupancy= " << *occup << std::endl;
} else {
hipLaunchKernelGGL(( run_merge_blocks), dim3(nBlock), dim3(nThread), 0, 0, GPUMemAoS.get());
}
}
}
});
time("Memcpy back", "\n", [&](){
checkCuda( hipMemcpy(slabECS.get(), GPUMemECS.get(), sizeof(SlabSoA), hipMemcpyDefault) );
checkCuda( hipMemcpy(slabAoS.get(), GPUMemAoS.get(), sizeof(SlabAoS), hipMemcpyDefault) );
});
std::cout << "Particles from ECS, GPU:\n";
slabECS->tracks[0][0].dump(0);
slabECS->tracks[0][0].dump(1);
slabECS->tracks[0][1].dump(2);
slabECS->tracks[1][1].dump(3);
std::cout << "Particles from AoS, GPU:\n";
slabAoS->tracks[0][0*SlabSoA::tracks_per_block + 0].dump(0);
slabAoS->tracks[0][0*SlabSoA::tracks_per_block + 1].dump(0);
slabAoS->tracks[0][1*SlabSoA::tracks_per_block + 2].dump(0);
slabAoS->tracks[1][1*SlabSoA::tracks_per_block + 3].dump(0);
}
| 5e799b9bd32cb4c748c9219ed1a32bfd023aff2c.cu | // SPDX-FileCopyrightText: 2021 CERN
// SPDX-License-Identifier: Apache-2.0
#include "ecs.h"
#include "Kernels.h"
#include "mergeAndSortBlocks.cuh"
#include "CudaHelpers.h"
#include <algorithm>
#include <cstdio>
#include <cstddef>
#include <cstdint>
#include <typeinfo>
#include <chrono>
#include <memory>
#include <iostream>
#include <iomanip>
/// Generic launcher for use with e.g. lambdas.
/// It passes to the function:
/// - task index
/// - block index
/// - track index
/// - the number of the track in each task
template<typename Slab, typename Func_t>
__global__ void launcher(Slab* slab, Func_t func, unsigned int loadFactor = 1)
{
for (unsigned int extraRuns = 0; extraRuns < loadFactor; ++extraRuns) {
for (unsigned int taskIdx = 0; taskIdx < slab->tasks_per_slab; ++taskIdx) {
for (unsigned int globalTrackIdx_inTask = blockIdx.x * blockDim.x + threadIdx.x;
globalTrackIdx_inTask < Slab::tracks_per_block * Slab::blocks_per_task;
globalTrackIdx_inTask += gridDim.x * blockDim.x) {
const auto tIdx = globalTrackIdx_inTask % Slab::tracks_per_block;
const auto bIdx = globalTrackIdx_inTask / Slab::tracks_per_block;
assert(tIdx < Slab::tracks_per_block);
assert(bIdx < Slab::blocks_per_task);
func(slab->tracks[taskIdx][bIdx][tIdx], taskIdx, bIdx, tIdx, globalTrackIdx_inTask);
}
}
}
}
/// A timing helper that invokes a kernel and synchronises the device afterwards.
void time(std::string what, std::string after, std::function<void()> func) {
std::cout << std::setw(30) << std::left << what << std::flush;
const auto start = std::chrono::high_resolution_clock::now();
func();
checkCuda( cudaDeviceSynchronize() );
const auto stop = std::chrono::high_resolution_clock::now();
std::cout << "\t" << std::right << std::fixed << std::setprecision(3) << std::setw(12)
<< std::chrono::duration<double, std::milli>(stop - start).count() << " ms."
<< after << std::flush;
}
template<typename Slab>
__global__
void checkGlobalOccupancy(Slab* slab, unsigned int* storeOccupancy, unsigned int* referenceOccupancy = nullptr) {
unsigned int occup = 0;
for (auto& task : slab->tracks) {
for (auto& block : task) {
occup += occupancy<Slab::tracks_per_block, 1>(&block);
}
}
if (referenceOccupancy) {
assert(*referenceOccupancy == occup);
if (*referenceOccupancy != occup) {
printf("Error: Occupancy is wrong: last=%d now=%d\n", *referenceOccupancy, occup);
}
}
*storeOccupancy = occup;
}
int main() {
constexpr unsigned int loadMultiplier = 100;
#define MEASURE_COMPACTING_TIME false
#ifdef NDEBUG
constexpr unsigned int nBlock = SlabSoA::blocks_per_task;
constexpr unsigned int nThread = SlabSoA::tracks_per_block;
#else
constexpr unsigned int nBlock = 2;
constexpr unsigned int nThread = 64;
#endif
// First initialisation, so it doesn't affect timings
cudaDeviceSynchronize();
std::unique_ptr<SlabSoA> slabECS;
std::unique_ptr<SlabAoS> slabAoS;
time("ECS construct slabs", "\n", [&](){
slabECS = std::make_unique<SlabSoA>();
});
time("AoS construct slabs", "\n", [&](){
slabAoS = std::make_unique<SlabAoS>();
});
std::unique_ptr<SlabSoA, CudaDeleter> GPUMemECS;
std::unique_ptr<SlabSoA, CudaDeleter> GPUMemECS2;
std::unique_ptr<SlabAoS, CudaDeleter> GPUMemAoS;
std::unique_ptr<SlabAoS, CudaDeleter> GPUMemAoS2;
time("cuda malloc+memcpy", "\n", [&](){
GPUMemECS = make_unique_cuda<SlabSoA>();
GPUMemECS2 = make_unique_cuda<SlabSoA>();
checkCuda( cudaMemcpy(GPUMemECS.get(), slabECS.get(), sizeof(SlabSoA), cudaMemcpyDefault) );
GPUMemAoS = make_unique_cuda<SlabAoS>();
GPUMemAoS2 = make_unique_cuda<SlabAoS>();
checkCuda( cudaMemcpy(GPUMemAoS.get(), slabAoS.get(), sizeof(SlabAoS), cudaMemcpyDefault) );
});
// Give all particles an id
// ------------------------------------
time("ECS enumerate_particles", "\t", [&](){
host_enumerate_particles(slabECS.get());
});
time(" GPU", "\t", [&](){
run_enumerate_particles<<<nBlock, nThread>>>(GPUMemECS.get(), 1);
});
checkEnumeration<<<1, 1>>>(GPUMemECS.get());
checkCuda( cudaDeviceSynchronize() );
for (auto nRuns : std::initializer_list<unsigned int>{1, loadMultiplier}) {
time(" GPU (lambda, run " + std::to_string(nRuns) + "x)", "\t", [&](){
launcher<<<nBlock, nThread>>>(GPUMemECS.get(), []__device__(decltype(GPUMemECS->tracks[0][0][0]) track,
unsigned int taskIdx, unsigned int bIdx, unsigned int tIdx, unsigned int globalTIdx){
const unsigned int particleID = taskIdx * SlabSoA::blocks_per_task * SlabSoA::tracks_per_block
+ globalTIdx;
enumerate_particles(track, particleID);
},
nRuns);
});
}
std::cout << std::endl;
checkEnumeration<<<1, 1>>>(GPUMemECS.get());
checkCuda( cudaDeviceSynchronize() );
time("AoS enumerate_particles", "\t", [&](){
host_enumerate_particles(slabAoS.get());
});
time(" GPU", "\t", [&](){
run_enumerate_particles<<<nBlock, nThread>>>(GPUMemAoS.get());
});
checkEnumeration<<<1, 1>>>(GPUMemAoS.get());
checkCuda( cudaDeviceSynchronize() );
for (auto nRuns : std::initializer_list<unsigned int>{1, loadMultiplier}) {
time(" GPU (lambda, run " + std::to_string(nRuns) + "x)", "\t", [&](){
launcher<<<nBlock, nThread>>>(GPUMemAoS.get(), []__device__(decltype(GPUMemAoS->tracks[0][0][0]) track,
unsigned int taskIdx, unsigned int bIdx, unsigned int tIdx, unsigned int globalTIdx){
const unsigned int particleID = taskIdx * SlabSoA::blocks_per_task * SlabSoA::tracks_per_block
+ globalTIdx;
enumerate_particles(track, particleID);
},
nRuns);
});
}
std::cout << std::endl;
checkEnumeration<<<1, 1>>>(GPUMemAoS.get());
checkCuda( cudaDeviceSynchronize() );
// Seed all rngs on GPU
// --------------------
time("ECS seed rng", "\n", [&](){
launcher<<<nBlock, nThread>>>(GPUMemECS.get(), []__device__(decltype(GPUMemECS->tracks[0][0][0]) track,
unsigned int taskIdx, unsigned int bIdx, unsigned int tIdx, unsigned int globalTIdx){
seed_rng(track);
});
});
time("AoS seed rng", "\n", [&](){
launcher<<<nBlock, nThread>>>(GPUMemAoS.get(), []__device__(decltype(GPUMemAoS->tracks[0][0][0]) track,
unsigned int taskIdx, unsigned int bIdx, unsigned int tIdx, unsigned int globalTIdx){
seed_rng(track);
});
});
// Initialise position, momentum and energy
// ----------------------------------------
time("ECS compute_energy", "\t", [&](){
host_compute_energy(slabECS.get());
});
time(" GPU (run 1x, invoke as kernel)", "\t", [&](){
run_compute_energy<<<nBlock, nThread>>>(GPUMemECS.get());
});
for (auto nRuns : std::initializer_list<unsigned int>{1, loadMultiplier}) {
time(" GPU (run " + std::to_string(nRuns) + "x, invoke as lambda)", "\t", [&](){
launcher<<<nBlock, nThread>>>(GPUMemECS.get(), []__device__(decltype(GPUMemECS->tracks[0][0][0]) track,
unsigned int taskIdx, unsigned int bIdx, unsigned int tIdx, unsigned int globalTIdx){
init_pos_mom(track);
compute_energy(track);
},
nRuns);
});
}
std::cout << std::endl;
time("AoS compute_energy", "\t", [&](){
host_compute_energy(slabAoS.get());
});
time(" GPU (run 1x, invoke as kernel)", "\t", [&](){
run_compute_energy<<<nBlock, nThread>>>(GPUMemAoS.get());
});
for (auto nRuns : std::initializer_list<unsigned int>{1, loadMultiplier}) {
time(" GPU (run " + std::to_string(nRuns) + "x, invoke as lambda)", "\t", [&](){
launcher<<<nBlock, nThread>>>(GPUMemAoS.get(), [=]__device__(decltype(GPUMemAoS->tracks[0][0][0]) track,
unsigned int taskIdx, unsigned int bIdx, unsigned int tIdx, unsigned int globalTIdx){
init_pos_mom(track);
compute_energy(track);
},
nRuns);
});
}
std::cout << std::endl;
// Print some particles
// ------------------------
std::cout << "Particles from ECS, CPU:\n";
slabECS->tracks[0][0].dump(0);
slabECS->tracks[0][0].dump(1);
slabECS->tracks[0][1].dump(2);
slabECS->tracks[1][1].dump(3);
std::cout << "Particles from AoS, CPU:\n";
slabAoS->tracks[0][0*SlabSoA::tracks_per_block + 0].dump(0);
slabAoS->tracks[0][0*SlabSoA::tracks_per_block + 1].dump(0);
slabAoS->tracks[0][1*SlabSoA::tracks_per_block + 2].dump(0);
slabAoS->tracks[1][1*SlabSoA::tracks_per_block + 3].dump(0);
time("Memcpy back", "\n", [&](){
checkCuda( cudaMemcpy(slabECS.get(), GPUMemECS.get(), sizeof(SlabSoA), cudaMemcpyDefault) );
checkCuda( cudaMemcpy(slabAoS.get(), GPUMemAoS.get(), sizeof(SlabAoS), cudaMemcpyDefault) );
});
std::cout << "Particles from ECS, GPU:\n";
slabECS->tracks[0][0].dump(0);
slabECS->tracks[0][0].dump(1);
slabECS->tracks[0][1].dump(2);
slabECS->tracks[1][1].dump(3);
std::cout << "Particles from AoS, GPU:\n";
slabAoS->tracks[0][0*SlabSoA::tracks_per_block + 0].dump(0);
slabAoS->tracks[0][0*SlabSoA::tracks_per_block + 1].dump(0);
slabAoS->tracks[0][1*SlabSoA::tracks_per_block + 2].dump(0);
slabAoS->tracks[1][1*SlabSoA::tracks_per_block + 3].dump(0);
// Save current state, so we can run compactification multiple times
time("Memcpy GPU to GPU", "\n", [&](){
checkCuda( cudaMemcpy(GPUMemECS2.get(), GPUMemECS.get(), sizeof(SlabSoA), cudaMemcpyDefault) );
checkCuda( cudaMemcpy(GPUMemAoS2.get(), GPUMemAoS.get(), sizeof(SlabAoS), cudaMemcpyDefault) );
});
// Advance particle by momentum vector with random magnitude
// ---------------------------------------------------------
// As a second step, advance the particles and kill them randomly
constexpr float survivalProbability = 0.97f;
unsigned int* occup;
cudaMallocManaged(&occup, sizeof(unsigned int));
time("\nECS advance_by_random_distance", "\t", [&](){
run_advance_by_random_distance(slabECS.get());
});
for (auto nRuns : std::initializer_list<unsigned int>{1, loadMultiplier}) {
time(" GPU (run " + std::to_string(nRuns) + "x)", "\t", [&](){
run_advance_by_random_distance_and_kill<<<nBlock, nThread>>>(GPUMemECS.get(), nRuns);
});
}
time(" GPU (run " + std::to_string(loadMultiplier) + "x, create holes)", "\n", [&](){
run_advance_by_random_distance_and_kill<<<nBlock, nThread>>>(GPUMemECS.get(), loadMultiplier, survivalProbability);
});
time("AoS advance_by_random_distance", "\t", [&](){
run_advance_by_random_distance(slabAoS.get());
});
for (auto nRuns : std::initializer_list<unsigned int>{1, loadMultiplier}) {
time(" GPU (run " + std::to_string(nRuns) + "x)", "\t", [&](){
run_advance_by_random_distance_and_kill<<<nBlock, nThread>>>(GPUMemAoS.get(), nRuns);
});
}
time(" GPU (run " + std::to_string(loadMultiplier) + "x, create holes)", "\n", [&](){
run_advance_by_random_distance_and_kill<<<nBlock, nThread>>>(GPUMemAoS.get(), loadMultiplier, survivalProbability);
});
checkCuda( cudaMemcpy(GPUMemECS.get(), GPUMemECS2.get(), sizeof(SlabSoA), cudaMemcpyDefault) );
time(" GPU (run separate launches " + std::to_string(loadMultiplier) + "x, create holes, compact)", "\n", [&](){
for (unsigned int run = 0; run < loadMultiplier; ++run) {
launcher<<<nBlock, nThread>>>(GPUMemECS.get(), []__device__(decltype(GPUMemECS->tracks[0][0][0]) track,
unsigned int taskIdx, unsigned int bIdx, unsigned int tIdx, unsigned int globalTIdx) {
advance_by_random_distance(track);
kill_random_particles(track, survivalProbability);
});
if (run % 5 == 0) {
if constexpr (MEASURE_COMPACTING_TIME) {
cudaDeviceSynchronize();
checkGlobalOccupancy<<<1, 1>>>(GPUMemECS.get(), occup);
cudaDeviceSynchronize();
time("ECS Compactification run", "", [&](){
run_merge_blocks<<<nBlock, nThread>>>(GPUMemECS.get());
});
checkGlobalOccupancy<<<1, 1>>>(GPUMemECS.get(), occup, occup);
cudaDeviceSynchronize();
std::cout << " Occupancy= " << *occup << std::endl;
} else {
run_merge_blocks<<<nBlock, nThread>>>(GPUMemECS.get());
}
}
}
});
checkCuda( cudaMemcpy(GPUMemAoS.get(), GPUMemAoS2.get(), sizeof(SlabAoS), cudaMemcpyDefault) );
time(" GPU (run separate launches " + std::to_string(loadMultiplier) + "x, create holes, compact)", "\n", [&](){
for (unsigned int run = 0; run < loadMultiplier; ++run) {
launcher<<<nBlock, nThread>>>(GPUMemAoS.get(), []__device__(decltype(GPUMemAoS->tracks[0][0][0]) track,
unsigned int taskIdx, unsigned int bIdx, unsigned int tIdx, unsigned int globalTIdx) {
advance_by_random_distance(track);
kill_random_particles(track, survivalProbability);
});
if (run % 5 == 0) {
if constexpr (MEASURE_COMPACTING_TIME) {
cudaDeviceSynchronize();
checkGlobalOccupancy<<<1, 1>>>(GPUMemAoS.get(), occup);
cudaDeviceSynchronize();
time("AoS Compactification run", "", [&](){
run_merge_blocks<<<nBlock, nThread>>>(GPUMemAoS.get());
});
checkGlobalOccupancy<<<1, 1>>>(GPUMemAoS.get(), occup, occup);
cudaDeviceSynchronize();
std::cout << " Occupancy= " << *occup << std::endl;
} else {
run_merge_blocks<<<nBlock, nThread>>>(GPUMemAoS.get());
}
}
}
});
time("Memcpy back", "\n", [&](){
checkCuda( cudaMemcpy(slabECS.get(), GPUMemECS.get(), sizeof(SlabSoA), cudaMemcpyDefault) );
checkCuda( cudaMemcpy(slabAoS.get(), GPUMemAoS.get(), sizeof(SlabAoS), cudaMemcpyDefault) );
});
std::cout << "Particles from ECS, GPU:\n";
slabECS->tracks[0][0].dump(0);
slabECS->tracks[0][0].dump(1);
slabECS->tracks[0][1].dump(2);
slabECS->tracks[1][1].dump(3);
std::cout << "Particles from AoS, GPU:\n";
slabAoS->tracks[0][0*SlabSoA::tracks_per_block + 0].dump(0);
slabAoS->tracks[0][0*SlabSoA::tracks_per_block + 1].dump(0);
slabAoS->tracks[0][1*SlabSoA::tracks_per_block + 2].dump(0);
slabAoS->tracks[1][1*SlabSoA::tracks_per_block + 3].dump(0);
}
|
c0e03a24df3869c11f4c16a502e4358be405c832.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include "hip/hip_runtime.h"
#include "kernel.h"
__global__ void kernel_sum(const float* A, const float* B, float* C, int n_el);
void sum(const float* A, const float* B, float* C, int n_el) {
int threadsPerBlock,blocksPerGrid;
if (n_el<512){
threadsPerBlock = n_el;
blocksPerGrid = 1;
} else {
threadsPerBlock = 512;
blocksPerGrid = ceil(double(n_el)/double(threadsPerBlock));
}
hipLaunchKernelGGL(( kernel_sum), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, A, B, C, n_el);
}
__global__ void kernel_sum(const float* A, const float* B, float* C, int n_el)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < n_el) C[tid] = A[tid] + B[tid];
} | c0e03a24df3869c11f4c16a502e4358be405c832.cu | #include <math.h>
#include "cuda_runtime.h"
#include "kernel.h"
__global__ void kernel_sum(const float* A, const float* B, float* C, int n_el);
void sum(const float* A, const float* B, float* C, int n_el) {
int threadsPerBlock,blocksPerGrid;
if (n_el<512){
threadsPerBlock = n_el;
blocksPerGrid = 1;
} else {
threadsPerBlock = 512;
blocksPerGrid = ceil(double(n_el)/double(threadsPerBlock));
}
kernel_sum<<<blocksPerGrid,threadsPerBlock>>>(A, B, C, n_el);
}
__global__ void kernel_sum(const float* A, const float* B, float* C, int n_el)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < n_el) C[tid] = A[tid] + B[tid];
} |
f1107ef87b90855189d9d954e879ff572625f56b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "counting_sort.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *array = NULL;
hipMalloc(&array, XSIZE*YSIZE);
int *temp = NULL;
hipMalloc(&temp, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
counting_sort), dim3(gridBlock),dim3(threadBlock), 0, 0, array,temp,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
counting_sort), dim3(gridBlock),dim3(threadBlock), 0, 0, array,temp,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
counting_sort), dim3(gridBlock),dim3(threadBlock), 0, 0, array,temp,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f1107ef87b90855189d9d954e879ff572625f56b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "counting_sort.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *array = NULL;
cudaMalloc(&array, XSIZE*YSIZE);
int *temp = NULL;
cudaMalloc(&temp, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
counting_sort<<<gridBlock,threadBlock>>>(array,temp,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
counting_sort<<<gridBlock,threadBlock>>>(array,temp,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
counting_sort<<<gridBlock,threadBlock>>>(array,temp,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e83795f580652bdb3f639881f6e88ca647242e34.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// tdfc-cuda backend autocompiled body file
// tdfc version 1.160
// Wed May 25 15:55:04 2011
#include <stdio.h>
__global__ void tdfc_axpy(float cc_alpha,float* cc_x,float* cc_y_in,float* cc_y_out,int N )
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx<N)
{
{
cc_y_out[idx] = (((cc_x[idx]*cc_alpha)+cc_y_in[idx]));
}
}
} //tdfc_axpy
| e83795f580652bdb3f639881f6e88ca647242e34.cu | // tdfc-cuda backend autocompiled body file
// tdfc version 1.160
// Wed May 25 15:55:04 2011
#include <stdio.h>
__global__ void tdfc_axpy(float cc_alpha,float* cc_x,float* cc_y_in,float* cc_y_out,int N )
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx<N)
{
{
cc_y_out[idx] = (((cc_x[idx]*cc_alpha)+cc_y_in[idx]));
}
}
} //tdfc_axpy
|
33c585d93f66b3ad8bdb668c33bcaa0f9cca1019.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
#include "THHDeviceTensor.cuh"
#include "THHDeviceTensorUtils.cuh"
#include "THHDeviceUtils.cuh"
#include <cfloat>
__global__ void cuda_VolumetricMaxPooling_updateOutput(
THCDeviceTensor<float, 4> input,
THCDeviceTensor<float, 4> indices,
THCDeviceTensor<float, 4> output,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW, int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.getSize(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oColumn < output.getSize(3))
{
int iColumn = oColumn * dW - padW;
int iRow = oRow * dH - padH;
int iFrame = oFrame * dT - padT;
int maxColumn = 0;
int maxRow = 0;
int maxFrame = 0;
float max = -FLT_MAX;
for (int frame = 0; frame < kT; ++frame)
{
if (iFrame + frame < input.getSize(1) && iFrame + frame >= 0)
{
for (int row = 0; row < kH; ++row)
{
if (iRow + row < input.getSize(2) && iRow + row >= 0)
{
for (int column = 0; column < kW; ++column)
{
if (iColumn + column < input.getSize(3) && iColumn + column >= 0)
{
float val = input[slice][iFrame + frame][iRow + row][iColumn + column];
if (max < val)
{
max = val;
maxColumn = column;
maxRow = row;
maxFrame = frame;
}
}
}
}
}
}
}
output[slice][oFrame][oRow][oColumn] = max;
float *idx = &indices[slice][oFrame][oRow][oColumn];
((unsigned char*)(idx))[0] = maxFrame;
((unsigned char*)(idx))[1] = maxRow;
((unsigned char*)(idx))[2] = maxColumn;
((unsigned char*)(idx))[3] = 0;
}
}
template <int KERNEL_WIDTH>
__global__ void cuda_VolumetricMaxPooling_updateOutput(
THCDeviceTensor<float, 4> input, THCDeviceTensor<float, 4> indices,
THCDeviceTensor<float, 4> output,
int kT, int kH,
int dT, int dH, int dW,
int padT, int padH, int padW, int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.getSize(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oColumn < output.getSize(3))
{
int iColumn = oColumn * dW - padW;
int iRow = oRow * dH - padH;
int iFrame = oFrame * dT - padT;
int maxColumn = 0;
int maxRow = 0;
int maxFrame;
float max = -FLT_MAX;
for (int frame = 0; frame < kT; ++frame)
{
if (iFrame + frame < input.getSize(1) && iFrame + frame >= 0)
{
for (int row = 0; row < kH; ++row)
{
if (iRow + row < input.getSize(2) && iRow + row >= 0)
{
for (int column = 0; column < KERNEL_WIDTH; ++column)
{
if (iColumn + column < input.getSize(3) && iColumn + column >= 0)
{
float val = input[slice][iFrame + frame][iRow + row][iColumn + column];
if (max < val)
{
max = val;
maxColumn = column;
maxRow = row;
maxFrame = frame;
}
}
}
}
}
}
}
output[slice][oFrame][oRow][oColumn] = max;
float *idx = &indices[slice][oFrame][oRow][oColumn];
((unsigned char*)(idx))[0] = maxFrame;
((unsigned char*)(idx))[1] = maxRow;
((unsigned char*)(idx))[2] = maxColumn;
((unsigned char*)(idx))[3] = 0;
}
}
#define UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \
hipLaunchKernelGGL(( cuda_VolumetricMaxPooling_updateOutput<KW>), dim3(grid), dim3(block), \
0, THCState_getCurrentStream(state), \
cudaInput, cudaIndices, cudaOutput, kT, kH, dT, dH, dW, padT, padH, padW, offsetZ); \
break
void THNN_CudaVolumetricMaxPooling_updateOutput(
THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *indices,
int kT, int kW, int kH,
int dT, int dW, int dH,
int padT, int padW, int padH,
bool ceilMode)
{
int batchSize;
int inputSlices;
int inputTime;
int inputHeight;
int inputWidth;
int outputTime;
int outputHeight;
int outputWidth;
THCUNN_assertSameGPU(state, 3, input, indices, output);
if (THCudaTensor_nDimension(state, input) == 4)
{
THArgCheck(
THCudaTensor_size(state, input, 1) >= kT &&
THCudaTensor_size(state, input, 2) >= kH &&
THCudaTensor_size(state, input, 3) >= kW, 2,
"input image smaller than kernel size"
);
/* sizes */
batchSize = 1;
inputSlices = THCudaTensor_size(state, input, 0);
inputTime = THCudaTensor_size(state, input, 1);
inputHeight = THCudaTensor_size(state, input, 2);
inputWidth = THCudaTensor_size(state, input, 3);
}
else if (THCudaTensor_nDimension(state, input) == 5)
{
THArgCheck(
THCudaTensor_size(state, input, 4) >= kW &&
THCudaTensor_size(state, input, 3) >= kH &&
THCudaTensor_size(state, input, 2) >= kT, 2,
"input image smaller than kernel size"
);
/* sizes */
batchSize = THCudaTensor_size(state, input, 0);
inputSlices = THCudaTensor_size(state, input, 1);
inputTime = THCudaTensor_size(state, input, 2);
inputHeight = THCudaTensor_size(state, input, 3);
inputWidth = THCudaTensor_size(state, input, 4);
}
else
{
THArgCheck(false, 2, "4D or 5D tensor expected");
}
THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 2,
"pad should be smaller than half of kernel size"
);
if (ceilMode)
{
outputTime = (int)(ceil((float)(inputTime - kT + 2*padT) / dT)) + 1;
outputHeight = (int)(ceil((float)(inputHeight - kH + 2*padH) / dH)) + 1;
outputWidth = (int)(ceil((float)(inputWidth - kW + 2*padW) / dW)) + 1;
}
else
{
outputTime = (int)(floor((float)(inputTime - kT + 2*padT) / dT)) + 1;
outputHeight = (int)(floor((float)(inputHeight - kH + 2*padH) / dH)) + 1;
outputWidth = (int)(floor((float)(inputWidth - kW + 2*padW) / dW)) + 1;
}
if (padT || padW || padH)
{
if ((outputTime - 1)*dT >= inputTime + padT)
--outputTime;
if ((outputHeight - 1)*dH >= inputHeight + padH)
--outputHeight;
if ((outputWidth - 1)*dW >= inputWidth + padW)
--outputWidth;
}
if (input->nDimension == 4) /* 4D */
{
/* resize output */
THCudaTensor_resize4d(state, output, inputSlices,
outputTime, outputHeight, outputWidth);
/* indices pack ti,i,j locations for each output point as uchar into
each float of the tensor */
THCudaTensor_resize4d(state, indices, inputSlices,
outputTime, outputHeight, outputWidth);
}
else
{ /* 5D */
THCudaTensor_resize5d(state, output, batchSize, inputSlices,
outputTime, outputHeight, outputWidth);
// Index tensor packs index offsets as uchars into floats
THCudaTensor_resize5d(state, indices, batchSize, inputSlices,
outputTime, outputHeight, outputWidth);
}
input = THCudaTensor_newContiguous(state, input);
// Collapse batch and feature dimensions
THCDeviceTensor<float, 4> cudaInput;
THCDeviceTensor<float, 4> cudaOutput;
if (THCudaTensor_nDimension(state, input) == 4)
{
cudaInput = toDeviceTensor<float, 4>(state, input);
cudaOutput = toDeviceTensor<float, 4>(state, output);
}
else
{
cudaInput = toDeviceTensor<float, 5>(state, input).downcastOuter<4>();
cudaOutput = toDeviceTensor<float, 5>(state, output).downcastOuter<4>();
}
THLongStorage *indicesSize = THLongStorage_newWithSize(4);
long indicesSizeRaw[4] = { batchSize * inputSlices,
outputTime, outputHeight, outputWidth };
THLongStorage_rawCopy(indicesSize, indicesSizeRaw);
THCudaTensor *indices1 = THCudaTensor_newWithStorage(
state, THCudaTensor_storage(state, indices),
THCudaTensor_storageOffset(state, indices),
indicesSize, NULL);
THLongStorage_free(indicesSize);
THCDeviceTensor<float, 4> cudaIndices =
toDeviceTensor<float, 4>(state, indices1);
int totalZ = outputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)),
THCCeilDiv(outputHeight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
switch (kW)
{
UPDATE_OUTPUT_KERNEL_WIDTH(1);
UPDATE_OUTPUT_KERNEL_WIDTH(2);
UPDATE_OUTPUT_KERNEL_WIDTH(3);
UPDATE_OUTPUT_KERNEL_WIDTH(4);
UPDATE_OUTPUT_KERNEL_WIDTH(5);
UPDATE_OUTPUT_KERNEL_WIDTH(6);
UPDATE_OUTPUT_KERNEL_WIDTH(7);
default:
hipLaunchKernelGGL(( cuda_VolumetricMaxPooling_updateOutput), dim3(grid), dim3(block),
0, THCState_getCurrentStream(state),
cudaInput, cudaIndices, cudaOutput,
kT, kH, kW, dT, dH, dW,
padT, padH, padW, offsetZ);
}
THCudaCheck(hipGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
THCudaTensor_free(state, input);
THCudaTensor_free(state, indices1);
}
#undef UPDATE_OUTPUT_KERNEL_WIDTH
__global__ void cuda_VolumetricMaxPooling_updateGradInput(
THCDeviceTensor<float, 4> gradOutput,
THCDeviceTensor<float, 4> indices,
THCDeviceTensor<float, 4> gradInput,
int dT, int dH, int dW,
int padT, int padH, int padW, int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.getSize(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.getSize(1); // output slice/feature
if (oRow < gradOutput.getSize(2) && oColumn < gradOutput.getSize(3))
{
float *idx = &indices[slice][oFrame][oRow][oColumn];
int iFrame = ((unsigned char*)(idx))[0] + oFrame * dT - padT;
int iRow = ((unsigned char*)(idx))[1] + oRow * dH - padH;
int iColumn = ((unsigned char*)(idx))[2] + oColumn * dW - padW;
atomicAdd(&gradInput[slice][iFrame][iRow][iColumn],
gradOutput[slice][oFrame][oRow][oColumn]);
}
}
void THNN_CudaVolumetricMaxPooling_updateGradInput(
THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput,
THCudaTensor *indices,
int dT, int dW, int dH,
int padT, int padW, int padH)
{
// Resize and initialize result tensor.
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
int batchSize;
int inputSlices;
int outputTime;
int outputHeight;
int outputWidth;
THCUNN_assertSameGPU(state, 4, input, indices, gradOutput, gradInput);
if (THCudaTensor_nDimension(state, input) == 4) /* 4D */
{
batchSize = 1;
inputSlices = THCudaTensor_size(state, input, 0);
outputTime = THCudaTensor_size(state, gradOutput, 1);
outputHeight = THCudaTensor_size(state, gradOutput, 2);
outputWidth = THCudaTensor_size(state, gradOutput, 3);
}
else
{
batchSize = THCudaTensor_size(state, input, 0);
inputSlices = THCudaTensor_size(state, input, 1);
outputTime = THCudaTensor_size(state, gradOutput, 2);
outputHeight = THCudaTensor_size(state, gradOutput, 3);
outputWidth = THCudaTensor_size(state, gradOutput, 4);
}
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
// Collapse batch and feature dimensions
THCDeviceTensor<float, 4> cudaGradInput;
THCDeviceTensor<float, 4> cudaGradOutput;
if (THCudaTensor_nDimension(state, input) == 4)
{
cudaGradInput = toDeviceTensor<float, 4>(state, gradInput);
cudaGradOutput = toDeviceTensor<float, 4>(state, gradOutput);
}
else
{
cudaGradInput =
toDeviceTensor<float, 5>(state, gradInput).downcastOuter<4>();
cudaGradOutput =
toDeviceTensor<float, 5>(state, gradOutput).downcastOuter<4>();
}
THLongStorage *indicesSize = THLongStorage_newWithSize(4);
long indicesSizeRaw[4] = { batchSize * inputSlices,
outputTime, outputHeight, outputWidth };
THLongStorage_rawCopy(indicesSize, indicesSizeRaw);
THCudaTensor *indices1 = THCudaTensor_newWithStorage(
state, THCudaTensor_storage(state, indices),
THCudaTensor_storageOffset(state, indices), indicesSize, NULL);
THLongStorage_free(indicesSize);
THCDeviceTensor<float, 4> cudaIndices =
toDeviceTensor<float, 4>(state, indices1);
int totalZ = outputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)),
THCCeilDiv(outputHeight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( cuda_VolumetricMaxPooling_updateGradInput), dim3(grid), dim3(block),
0, THCState_getCurrentStream(state),
cudaGradOutput,
cudaIndices,
cudaGradInput,
dT, dH, dW,
padT, padH, padW, offsetZ);
THCudaCheck(hipGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
// cleanup
THCudaTensor_free(state, gradOutput);
THCudaTensor_free(state, indices1);
}
| 33c585d93f66b3ad8bdb668c33bcaa0f9cca1019.cu | #include "THCUNN.h"
#include "common.h"
#include "THCDeviceTensor.cuh"
#include "THCDeviceTensorUtils.cuh"
#include "THCDeviceUtils.cuh"
#include <cfloat>
__global__ void cuda_VolumetricMaxPooling_updateOutput(
THCDeviceTensor<float, 4> input,
THCDeviceTensor<float, 4> indices,
THCDeviceTensor<float, 4> output,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW, int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.getSize(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oColumn < output.getSize(3))
{
int iColumn = oColumn * dW - padW;
int iRow = oRow * dH - padH;
int iFrame = oFrame * dT - padT;
int maxColumn = 0;
int maxRow = 0;
int maxFrame = 0;
float max = -FLT_MAX;
for (int frame = 0; frame < kT; ++frame)
{
if (iFrame + frame < input.getSize(1) && iFrame + frame >= 0)
{
for (int row = 0; row < kH; ++row)
{
if (iRow + row < input.getSize(2) && iRow + row >= 0)
{
for (int column = 0; column < kW; ++column)
{
if (iColumn + column < input.getSize(3) && iColumn + column >= 0)
{
float val = input[slice][iFrame + frame][iRow + row][iColumn + column];
if (max < val)
{
max = val;
maxColumn = column;
maxRow = row;
maxFrame = frame;
}
}
}
}
}
}
}
output[slice][oFrame][oRow][oColumn] = max;
float *idx = &indices[slice][oFrame][oRow][oColumn];
((unsigned char*)(idx))[0] = maxFrame;
((unsigned char*)(idx))[1] = maxRow;
((unsigned char*)(idx))[2] = maxColumn;
((unsigned char*)(idx))[3] = 0;
}
}
template <int KERNEL_WIDTH>
__global__ void cuda_VolumetricMaxPooling_updateOutput(
THCDeviceTensor<float, 4> input, THCDeviceTensor<float, 4> indices,
THCDeviceTensor<float, 4> output,
int kT, int kH,
int dT, int dH, int dW,
int padT, int padH, int padW, int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.getSize(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oColumn < output.getSize(3))
{
int iColumn = oColumn * dW - padW;
int iRow = oRow * dH - padH;
int iFrame = oFrame * dT - padT;
int maxColumn = 0;
int maxRow = 0;
int maxFrame;
float max = -FLT_MAX;
for (int frame = 0; frame < kT; ++frame)
{
if (iFrame + frame < input.getSize(1) && iFrame + frame >= 0)
{
for (int row = 0; row < kH; ++row)
{
if (iRow + row < input.getSize(2) && iRow + row >= 0)
{
for (int column = 0; column < KERNEL_WIDTH; ++column)
{
if (iColumn + column < input.getSize(3) && iColumn + column >= 0)
{
float val = input[slice][iFrame + frame][iRow + row][iColumn + column];
if (max < val)
{
max = val;
maxColumn = column;
maxRow = row;
maxFrame = frame;
}
}
}
}
}
}
}
output[slice][oFrame][oRow][oColumn] = max;
float *idx = &indices[slice][oFrame][oRow][oColumn];
((unsigned char*)(idx))[0] = maxFrame;
((unsigned char*)(idx))[1] = maxRow;
((unsigned char*)(idx))[2] = maxColumn;
((unsigned char*)(idx))[3] = 0;
}
}
#define UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \
cuda_VolumetricMaxPooling_updateOutput<KW><<<grid, block, \
0, THCState_getCurrentStream(state)>>>( \
cudaInput, cudaIndices, cudaOutput, kT, kH, dT, dH, dW, padT, padH, padW, offsetZ); \
break
void THNN_CudaVolumetricMaxPooling_updateOutput(
THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *indices,
int kT, int kW, int kH,
int dT, int dW, int dH,
int padT, int padW, int padH,
bool ceilMode)
{
int batchSize;
int inputSlices;
int inputTime;
int inputHeight;
int inputWidth;
int outputTime;
int outputHeight;
int outputWidth;
THCUNN_assertSameGPU(state, 3, input, indices, output);
if (THCudaTensor_nDimension(state, input) == 4)
{
THArgCheck(
THCudaTensor_size(state, input, 1) >= kT &&
THCudaTensor_size(state, input, 2) >= kH &&
THCudaTensor_size(state, input, 3) >= kW, 2,
"input image smaller than kernel size"
);
/* sizes */
batchSize = 1;
inputSlices = THCudaTensor_size(state, input, 0);
inputTime = THCudaTensor_size(state, input, 1);
inputHeight = THCudaTensor_size(state, input, 2);
inputWidth = THCudaTensor_size(state, input, 3);
}
else if (THCudaTensor_nDimension(state, input) == 5)
{
THArgCheck(
THCudaTensor_size(state, input, 4) >= kW &&
THCudaTensor_size(state, input, 3) >= kH &&
THCudaTensor_size(state, input, 2) >= kT, 2,
"input image smaller than kernel size"
);
/* sizes */
batchSize = THCudaTensor_size(state, input, 0);
inputSlices = THCudaTensor_size(state, input, 1);
inputTime = THCudaTensor_size(state, input, 2);
inputHeight = THCudaTensor_size(state, input, 3);
inputWidth = THCudaTensor_size(state, input, 4);
}
else
{
THArgCheck(false, 2, "4D or 5D tensor expected");
}
THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 2,
"pad should be smaller than half of kernel size"
);
if (ceilMode)
{
outputTime = (int)(ceil((float)(inputTime - kT + 2*padT) / dT)) + 1;
outputHeight = (int)(ceil((float)(inputHeight - kH + 2*padH) / dH)) + 1;
outputWidth = (int)(ceil((float)(inputWidth - kW + 2*padW) / dW)) + 1;
}
else
{
outputTime = (int)(floor((float)(inputTime - kT + 2*padT) / dT)) + 1;
outputHeight = (int)(floor((float)(inputHeight - kH + 2*padH) / dH)) + 1;
outputWidth = (int)(floor((float)(inputWidth - kW + 2*padW) / dW)) + 1;
}
if (padT || padW || padH)
{
if ((outputTime - 1)*dT >= inputTime + padT)
--outputTime;
if ((outputHeight - 1)*dH >= inputHeight + padH)
--outputHeight;
if ((outputWidth - 1)*dW >= inputWidth + padW)
--outputWidth;
}
if (input->nDimension == 4) /* 4D */
{
/* resize output */
THCudaTensor_resize4d(state, output, inputSlices,
outputTime, outputHeight, outputWidth);
/* indices pack ti,i,j locations for each output point as uchar into
each float of the tensor */
THCudaTensor_resize4d(state, indices, inputSlices,
outputTime, outputHeight, outputWidth);
}
else
{ /* 5D */
THCudaTensor_resize5d(state, output, batchSize, inputSlices,
outputTime, outputHeight, outputWidth);
// Index tensor packs index offsets as uchars into floats
THCudaTensor_resize5d(state, indices, batchSize, inputSlices,
outputTime, outputHeight, outputWidth);
}
input = THCudaTensor_newContiguous(state, input);
// Collapse batch and feature dimensions
THCDeviceTensor<float, 4> cudaInput;
THCDeviceTensor<float, 4> cudaOutput;
if (THCudaTensor_nDimension(state, input) == 4)
{
cudaInput = toDeviceTensor<float, 4>(state, input);
cudaOutput = toDeviceTensor<float, 4>(state, output);
}
else
{
cudaInput = toDeviceTensor<float, 5>(state, input).downcastOuter<4>();
cudaOutput = toDeviceTensor<float, 5>(state, output).downcastOuter<4>();
}
THLongStorage *indicesSize = THLongStorage_newWithSize(4);
long indicesSizeRaw[4] = { batchSize * inputSlices,
outputTime, outputHeight, outputWidth };
THLongStorage_rawCopy(indicesSize, indicesSizeRaw);
THCudaTensor *indices1 = THCudaTensor_newWithStorage(
state, THCudaTensor_storage(state, indices),
THCudaTensor_storageOffset(state, indices),
indicesSize, NULL);
THLongStorage_free(indicesSize);
THCDeviceTensor<float, 4> cudaIndices =
toDeviceTensor<float, 4>(state, indices1);
int totalZ = outputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)),
THCCeilDiv(outputHeight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
switch (kW)
{
UPDATE_OUTPUT_KERNEL_WIDTH(1);
UPDATE_OUTPUT_KERNEL_WIDTH(2);
UPDATE_OUTPUT_KERNEL_WIDTH(3);
UPDATE_OUTPUT_KERNEL_WIDTH(4);
UPDATE_OUTPUT_KERNEL_WIDTH(5);
UPDATE_OUTPUT_KERNEL_WIDTH(6);
UPDATE_OUTPUT_KERNEL_WIDTH(7);
default:
cuda_VolumetricMaxPooling_updateOutput<<<grid, block,
0, THCState_getCurrentStream(state)>>>(
cudaInput, cudaIndices, cudaOutput,
kT, kH, kW, dT, dH, dW,
padT, padH, padW, offsetZ);
}
THCudaCheck(cudaGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
THCudaTensor_free(state, input);
THCudaTensor_free(state, indices1);
}
#undef UPDATE_OUTPUT_KERNEL_WIDTH
__global__ void cuda_VolumetricMaxPooling_updateGradInput(
THCDeviceTensor<float, 4> gradOutput,
THCDeviceTensor<float, 4> indices,
THCDeviceTensor<float, 4> gradInput,
int dT, int dH, int dW,
int padT, int padH, int padW, int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.getSize(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.getSize(1); // output slice/feature
if (oRow < gradOutput.getSize(2) && oColumn < gradOutput.getSize(3))
{
float *idx = &indices[slice][oFrame][oRow][oColumn];
int iFrame = ((unsigned char*)(idx))[0] + oFrame * dT - padT;
int iRow = ((unsigned char*)(idx))[1] + oRow * dH - padH;
int iColumn = ((unsigned char*)(idx))[2] + oColumn * dW - padW;
atomicAdd(&gradInput[slice][iFrame][iRow][iColumn],
gradOutput[slice][oFrame][oRow][oColumn]);
}
}
void THNN_CudaVolumetricMaxPooling_updateGradInput(
THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput,
THCudaTensor *indices,
int dT, int dW, int dH,
int padT, int padW, int padH)
{
// Resize and initialize result tensor.
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
int batchSize;
int inputSlices;
int outputTime;
int outputHeight;
int outputWidth;
THCUNN_assertSameGPU(state, 4, input, indices, gradOutput, gradInput);
if (THCudaTensor_nDimension(state, input) == 4) /* 4D */
{
batchSize = 1;
inputSlices = THCudaTensor_size(state, input, 0);
outputTime = THCudaTensor_size(state, gradOutput, 1);
outputHeight = THCudaTensor_size(state, gradOutput, 2);
outputWidth = THCudaTensor_size(state, gradOutput, 3);
}
else
{
batchSize = THCudaTensor_size(state, input, 0);
inputSlices = THCudaTensor_size(state, input, 1);
outputTime = THCudaTensor_size(state, gradOutput, 2);
outputHeight = THCudaTensor_size(state, gradOutput, 3);
outputWidth = THCudaTensor_size(state, gradOutput, 4);
}
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
// Collapse batch and feature dimensions
THCDeviceTensor<float, 4> cudaGradInput;
THCDeviceTensor<float, 4> cudaGradOutput;
if (THCudaTensor_nDimension(state, input) == 4)
{
cudaGradInput = toDeviceTensor<float, 4>(state, gradInput);
cudaGradOutput = toDeviceTensor<float, 4>(state, gradOutput);
}
else
{
cudaGradInput =
toDeviceTensor<float, 5>(state, gradInput).downcastOuter<4>();
cudaGradOutput =
toDeviceTensor<float, 5>(state, gradOutput).downcastOuter<4>();
}
THLongStorage *indicesSize = THLongStorage_newWithSize(4);
long indicesSizeRaw[4] = { batchSize * inputSlices,
outputTime, outputHeight, outputWidth };
THLongStorage_rawCopy(indicesSize, indicesSizeRaw);
THCudaTensor *indices1 = THCudaTensor_newWithStorage(
state, THCudaTensor_storage(state, indices),
THCudaTensor_storageOffset(state, indices), indicesSize, NULL);
THLongStorage_free(indicesSize);
THCDeviceTensor<float, 4> cudaIndices =
toDeviceTensor<float, 4>(state, indices1);
int totalZ = outputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)),
THCCeilDiv(outputHeight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
cuda_VolumetricMaxPooling_updateGradInput<<<grid, block,
0, THCState_getCurrentStream(state)>>>(
cudaGradOutput,
cudaIndices,
cudaGradInput,
dT, dH, dW,
padT, padH, padW, offsetZ);
THCudaCheck(cudaGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
// cleanup
THCudaTensor_free(state, gradOutput);
THCudaTensor_free(state, indices1);
}
|
0717e07d2bea8e046441c14874ab85891e477d3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//*LB*
// Copyright (c) 2009, Alexander Krizhevsky
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of the University of Toronto
// nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//*LE*
/*
* conv_util.cu
*
* Created on: Nov 10, 2009
* Author: Alex Krizhevsky
*
* These are routines that are useful for convolutional neural nets/RBMs.
*/
/*#include <cutil_inline.h>*/
#include <assert.h>
#include "conv_util_hip.cuh"
#include "conv_common.cuh"
#include "cuv/tools/cuv_general.hpp"
/*
* Block size 16x16
* Don't need shared memory on devices with compute capability 1.3 because memory
* doesn't have to be accessed sequentially by threads.
*
* This is far from perfect, and in many cases is actually slwoer than doing it on the
* CPU but still this takes so little time that it doesn't matter.
*/
__global__ void kRotate180(float* filters, float* targets, const int filterSize) {
// __shared__ float shFilter[16][16];
const int filtIdx = blockIdx.x;
const int readStart = MUL24(MUL24(filterSize, filterSize), filtIdx);
filters += readStart;
targets += readStart;
for(int y = threadIdx.y; y < filterSize; y += 16) {
for(int x = threadIdx.x; x < filterSize; x += 16) {
const int writeX = filterSize - 1 - x;
const int writeY = filterSize - 1 - y;
targets[MUL24(writeY, filterSize) + writeX] = filters[MUL24(y, filterSize) + x];
}
}
}
/*
* Block size 16x16.
* Probably a better idea to allocate multiple blocks per image so you don't have
* to loop inside the block.
*/
__global__ void kCopyInto(float* images, float* targets, const int imgSize, const int paddingSize, const int numImages) {
const int imgIdx = blockIdx.y * gridDim.x + blockIdx.x;
if (imgIdx < numImages) {
const int targetSize = imgSize + 2 * paddingSize;
images += imgIdx * imgSize * imgSize;
targets += imgIdx * targetSize * targetSize + MUL24(paddingSize, targetSize) + paddingSize;
for (int y = threadIdx.y; y < imgSize; y += 16) {
for (int x = threadIdx.x; x < imgSize; x += 16) {
targets[MUL24(y, targetSize) + x] = images[MUL24(y, imgSize) + x];
}
}
}
}
void rotate180(NVMatrix* filters, NVMatrix* targets, bool color=false) {
assert(!color || filters->getNumCols() % 3 == 0);
assert(!color && floor(sqrt(float(filters->getNumCols()))) == sqrt(float(filters->getNumCols()))
|| color && floor(sqrt(float(filters->getNumCols() / 3))) == sqrt(float(filters->getNumCols() / 3)));
assert(targets->isSameDims(*filters));
int numFilters = (color ? 3 : 1) * filters->getNumRows();
int filterSize = color ? int(sqrt(filters->getNumCols() / 3)) : int(sqrt(filters->getNumCols()));
dim3 threads(16, 16, 1);
dim3 blocks(numFilters, 1, 1);
hipLaunchKernelGGL(( kRotate180), dim3(blocks), dim3(threads), 0, 0, filters->getDevData(), targets->getDevData(), filterSize);
/*cutilCheckMsg("kernel execution failed");*/
cuvSafeCall(hipDeviceSynchronize());
}
/*
* This function copies the images in "images" into "targets" and adds a padding.
*
* Specifically, suppose "images" contains just one image and it looks like this:
* IIII
* IIII
* IIII
*
* And targets looks like this:
* XXXXXX
* XXXXXX
* XXXXXX
* XXXXXX
* XXXXXX
*
* After this function is called, targets will look like this:
* XXXXXX
* XIIIIX
* XIIIIX
* XIIIIX
* XXXXXX
*
* Where the Is and Xs are arbitrary values.
*
* You can use this function to pad a bunch of images with a border of zeros. To do this,
* the targets matrix should be all zeros.
*
*/
void copyInto(NVMatrix* images, NVMatrix* targets, int paddingSize, bool color=false) {
assert(!color || images->getNumCols() % 3 == 0);
assert(!color && floor(sqrt(float(images->getNumCols()))) == sqrt(float(images->getNumCols()))
|| color && floor(sqrt(float(images->getNumCols() / 3))) == sqrt(float(images->getNumCols() / 3)));
int imgSize = color ? int(sqrt(images->getNumCols() / 3)) : int(sqrt(images->getNumCols()));
int numImages = (color ? 3 : 1) * images->getNumRows();
assert(targets->getNumElements() == numImages * (imgSize + 2*paddingSize)*(imgSize + 2*paddingSize));
dim3 threads(16, 16, 1);
dim3 blocks(numImages, 1, 1);
while(blocks.x > NUM_BLOCKS_MAX) {
blocks.x = DIVUP(blocks.x, 2);
blocks.y *= 2;
}
hipLaunchKernelGGL(( kCopyInto), dim3(blocks), dim3(threads), 0, 0, images->getDevData(), targets->getDevData(), imgSize, paddingSize, numImages);
/*cutilCheckMsg("kernel execution failed");*/
cuvSafeCall(hipDeviceSynchronize());
}
/*
* f = factor, m = image size
* Converts a bunch of mxm images to (m/f)x(m/f) images by averaging non-overlapping fxf regions.
*
* The avoidBankConflicts option causes this function to use extra shared memory to avoid all
* bank conflicts. Most bank conflicts are avoided regardless of the setting of this parameter,
* and so setting this parameter to true will have minimal impact on performance (I noticed
* a 5% improvement). (stil can get 2-way conflicts if factor doesn't divide 16)
*/
void subsample(NVMatrix* images, NVMatrix* targets, int factor, bool avoidBankConflicts) {
int imgPixels = images->getNumCols();
assert(sqrt(float(imgPixels)) == floor(sqrt(float(imgPixels))));
int imgSize = sqrt(imgPixels);
assert(imgSize > factor);
assert(imgSize % factor == 0);
assert(factor <= 16);
assert(factor >= 2);
assert(imgSize <= 512);
// assert(factor % 2 == 0); // TODO: remove this restriction
int numRegions = images->getNumElements() / (factor*factor);
int numRegionsY = (imgSize / factor) * images->getNumRows();
assert(targets->getNumElements() == numRegions);
// assert(imgSize * (factor/2) <= 512); // for now
int regionsXPerBlock = imgSize / factor;
int numThreadsX = imgSize;
int SHMEM_MAX = 8192; // don't use more than this much shmem
int regionsYPerBlock = MIN(512 / numThreadsX, SHMEM_MAX / (4*imgSize)); // to avoid running out of shmem
// regionsYPerBlock--;
int regionsPerBlock = regionsYPerBlock * regionsXPerBlock;
// this will avoid all bank conflicts but may (?) use up too much shmem
int shmemPadX = avoidBankConflicts * (DIVUP(16,factor) + (regionsPerBlock % 16 == 0 ? 0 : 16 - regionsPerBlock % 16));
// shmemPadX = 0;
int shmemY = factor, shmemX = regionsPerBlock + shmemPadX;
int shmem = 4 * shmemX * shmemY;
if (shmem == 0 || shmem > 16300) {
// this really shouldn't happen and i've only put this here as a precautionary measure
// to avoid getting mysteriously wrong results.
fprintf(stderr, "subsample: not enough shared memory!");
exit(EXIT_FAILURE);
}
int numThreadsY = regionsYPerBlock;
// int blocks = numRegionsY / regionsYPerBlock;
int blocksX = imgSize / factor, blocksY = DIVUP(images->getNumRows(), regionsYPerBlock);
assert(blocksX < 65535 && blocksY < 65535);
// assert(numRegionsY % regionsYPerBlock == 0);
bool checkThreadBounds = numRegionsY % regionsYPerBlock != 0;
// printf("num regions y: %d, regions y per block: %d\n", numRegionsY, regionsYPerBlock);
dim3 grid(blocksX, blocksY);
dim3 threads(numThreadsX, numThreadsY);
// printf("grid: %ux%u, threads: %ux%u\n", grid.y, grid.x, threads.y, threads.x);
// printf("check bounds: %u\n", checkThreadBounds);
// printf("using %u bytes of shmem\n", shmem);
if (factor == 2) {
if (checkThreadBounds) {
hipLaunchKernelGGL(( kSubsample_noreduc<2, true>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
hipLaunchKernelGGL(( kSubsample_noreduc<2, false>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 3) {
if (checkThreadBounds) {
hipLaunchKernelGGL(( kSubsample_noreduc<3, true>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
hipLaunchKernelGGL(( kSubsample_noreduc<3, false>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 4) {
if (checkThreadBounds) {
hipLaunchKernelGGL(( kSubsample_noreduc<4, true>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
hipLaunchKernelGGL(( kSubsample_noreduc<4, false>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 5) {
if (checkThreadBounds) {
hipLaunchKernelGGL(( kSubsample_noreduc<5, true>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
hipLaunchKernelGGL(( kSubsample_noreduc<5, false>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 6) {
if (checkThreadBounds) {
hipLaunchKernelGGL(( kSubsample_noreduc<6, true>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
hipLaunchKernelGGL(( kSubsample_noreduc<6, false>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 7) {
if (checkThreadBounds) {
hipLaunchKernelGGL(( kSubsample_noreduc<7, true>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
hipLaunchKernelGGL(( kSubsample_noreduc<7, false>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 8) {
if (checkThreadBounds) {
hipLaunchKernelGGL(( kSubsample_noreduc<8, true>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
hipLaunchKernelGGL(( kSubsample_noreduc<8, false>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 9) {
if (checkThreadBounds) {
hipLaunchKernelGGL(( kSubsample_noreduc<9, true>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
hipLaunchKernelGGL(( kSubsample_noreduc<9, false>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 10) {
if (checkThreadBounds) {
hipLaunchKernelGGL(( kSubsample_noreduc<10, true>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
hipLaunchKernelGGL(( kSubsample_noreduc<10, false>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 11) {
if (checkThreadBounds) {
hipLaunchKernelGGL(( kSubsample_noreduc<11, true>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
hipLaunchKernelGGL(( kSubsample_noreduc<11, false>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 12) {
if (checkThreadBounds) {
hipLaunchKernelGGL(( kSubsample_noreduc<12, true>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
hipLaunchKernelGGL(( kSubsample_noreduc<12, false>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 13) {
if (checkThreadBounds) {
hipLaunchKernelGGL(( kSubsample_noreduc<13, true>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
hipLaunchKernelGGL(( kSubsample_noreduc<13, false>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 14) {
if (checkThreadBounds) {
hipLaunchKernelGGL(( kSubsample_noreduc<14, true>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
hipLaunchKernelGGL(( kSubsample_noreduc<14, false>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 15) {
if (checkThreadBounds) {
hipLaunchKernelGGL(( kSubsample_noreduc<15, true>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
hipLaunchKernelGGL(( kSubsample_noreduc<15, false>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 16) {
if (checkThreadBounds) {
hipLaunchKernelGGL(( kSubsample_noreduc<16, true>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
hipLaunchKernelGGL(( kSubsample_noreduc<16, false>), dim3(grid), dim3(threads),shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
}
/*cutilCheckMsg("kernel execution failed");*/
cuvSafeCall(hipDeviceSynchronize());
// if(factor == 4) {
//// kSubsample_reduc<4><<<grid, threads,4*numThreadsX*numThreadsY>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY);
// }
}
/*
* This is kind of a mess...could use some cleanup.
* Blows up a bunch of mxm images to (mf)x(mf)
*/
void supersample(NVMatrix* images, NVMatrix* targets, int factor) {
bool trans = images->isTrans();
int imgPixels = images->getNumCols();
int numImages = images->getNumRows();
assert(sqrt(float(imgPixels)) == floor(sqrt(float(imgPixels))));
int imgSize = sqrt(imgPixels);
assert(factor > 1 && factor <= 16);
assert(imgSize > 0 && imgSize <= 512);
int targetPixels = targets->getNumCols();
assert(sqrt(float(targetPixels)) == floor(sqrt(float(targetPixels))));
int targetSize = sqrt(targetPixels);
assert(targetSize % factor == 0);
assert(targetSize / factor == imgSize);
assert(targets->getNumElements() == images->getNumElements() * factor*factor);
int threadsX, threadsY;
int SHMEM_MAX = 8192; // don't use more than this much shmem
int shmemX, shmemY, blocksX, blocksY;
bool useLoopy = false;
int THREADS_MAX_LOOPY = 512, THREADS_MAX = trans ? 256 : 512;
if (!trans) {
threadsX = imgSize;
threadsY = factor * MIN(THREADS_MAX / (factor*threadsX), SHMEM_MAX / (4*threadsX*factor)); // to avoid running out of shmem
if(threadsY == 0) {
assert(factor <= 32); // yes this is covered by assert above but in case i ever remove that
THREADS_MAX = 512;
useLoopy = true;
threadsX = MIN(16, imgSize); // not that imgsize can be < 16 here under current conditions
threadsY = factor * MIN(THREADS_MAX_LOOPY / (factor*threadsX), SHMEM_MAX / (4*threadsX*factor)); // to avoid running out of shmem
}
shmemY = threadsY;
shmemX = threadsX;
blocksX = imgSize;
blocksY = DIVUP(numImages, threadsY);
// printf("boundary problems: %u\n", numImages % threadsY != 0);
} else {
threadsY = imgSize;
threadsX = factor * MIN(THREADS_MAX / (factor*threadsY), SHMEM_MAX / (4*threadsY*factor)); // to avoid running out of shmem
if(threadsX < 8) {
useLoopy = true;
int xFactorMult = DIVUP(16, factor);
threadsX = xFactorMult * factor;
threadsY = THREADS_MAX / threadsX;
int newThreadsX = threadsX, newThreadsY = threadsY;
while (newThreadsY > 0 && imgSize % newThreadsY != 0) { // let's see if we can make threadsY divide imgSize
newThreadsX += factor;
newThreadsY = THREADS_MAX / newThreadsX;
}
if (newThreadsY > 0) {
threadsY = newThreadsY;
threadsX = newThreadsX;
}
assert(threadsY > 0);
}
shmemY = threadsX;
shmemX = threadsY + (1 - (threadsY % 2));
blocksX = DIVUP(numImages, threadsX);
blocksY = imgSize;
// printf("boundary problems: %u\n", numImages % threadsX != 0);
}
int shmem = 4 * shmemX * shmemY;
if (shmem == 0 || shmem > 16300) {
// this really shouldn't happen and i've only put this here as a precautionary measure
// to avoid getting mysteriously wrong results.
fprintf(stderr, "supersample: not enough shared memory!");
exit(EXIT_FAILURE);
}
dim3 grid(blocksX, blocksY);
dim3 threads(threadsX, threadsY);
// printf("blocks: %dx%d, threads: %dx%d\n", blocksY, blocksX, threadsY, threadsX);
// printf("using %dx%d = %d bytes of shmem\n", shmemY, shmemX, shmem);
if(!trans) {
if(!useLoopy) {
if(factor == 2) {
hipLaunchKernelGGL(( kSupersampleMedium<2>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 3) {
hipLaunchKernelGGL(( kSupersampleMedium<3>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 4) {
hipLaunchKernelGGL(( kSupersampleMedium<4>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 5) {
hipLaunchKernelGGL(( kSupersampleMedium<5>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 6) {
hipLaunchKernelGGL(( kSupersampleMedium<6>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 7) {
hipLaunchKernelGGL(( kSupersampleMedium<7>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 8) {
hipLaunchKernelGGL(( kSupersampleMedium<8>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 9) {
hipLaunchKernelGGL(( kSupersampleMedium<9>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 10) {
hipLaunchKernelGGL(( kSupersampleMedium<10>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 11) {
hipLaunchKernelGGL(( kSupersampleMedium<11>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 12) {
hipLaunchKernelGGL(( kSupersampleMedium<12>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 13) {
hipLaunchKernelGGL(( kSupersampleMedium<13>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 14) {
hipLaunchKernelGGL(( kSupersampleMedium<14>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 15) {
hipLaunchKernelGGL(( kSupersampleMedium<15>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 16) {
hipLaunchKernelGGL(( kSupersampleMedium<16>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
}
} else {
if(factor == 2) {
hipLaunchKernelGGL(( kSupersampleMediumLoopy<2>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 3) {
hipLaunchKernelGGL(( kSupersampleMediumLoopy<3>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 4) {
hipLaunchKernelGGL(( kSupersampleMediumLoopy<4>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 5) {
hipLaunchKernelGGL(( kSupersampleMediumLoopy<5>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 6) {
hipLaunchKernelGGL(( kSupersampleMediumLoopy<6>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 7) {
hipLaunchKernelGGL(( kSupersampleMediumLoopy<7>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 8) {
hipLaunchKernelGGL(( kSupersampleMediumLoopy<8>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 9) {
hipLaunchKernelGGL(( kSupersampleMediumLoopy<9>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 10) {
hipLaunchKernelGGL(( kSupersampleMediumLoopy<10>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 11) {
hipLaunchKernelGGL(( kSupersampleMediumLoopy<11>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 12) {
hipLaunchKernelGGL(( kSupersampleMediumLoopy<12>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 13) {
hipLaunchKernelGGL(( kSupersampleMediumLoopy<13>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 14) {
hipLaunchKernelGGL(( kSupersampleMediumLoopy<14>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 15) {
hipLaunchKernelGGL(( kSupersampleMediumLoopy<15>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 16) {
hipLaunchKernelGGL(( kSupersampleMediumLoopy<16>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
}
}
} else {
if(!useLoopy) {
if(factor == 2) {
hipLaunchKernelGGL(( kSupersampleMediumTrans<2>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 3) {
hipLaunchKernelGGL(( kSupersampleMediumTrans<3>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 4) {
hipLaunchKernelGGL(( kSupersampleMediumTrans<4>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 5) {
hipLaunchKernelGGL(( kSupersampleMediumTrans<5>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 6) {
hipLaunchKernelGGL(( kSupersampleMediumTrans<6>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 7) {
hipLaunchKernelGGL(( kSupersampleMediumTrans<7>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 8) {
hipLaunchKernelGGL(( kSupersampleMediumTrans<8>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 9) {
hipLaunchKernelGGL(( kSupersampleMediumTrans<9>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 10) {
hipLaunchKernelGGL(( kSupersampleMediumTrans<10>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 11) {
hipLaunchKernelGGL(( kSupersampleMediumTrans<11>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 12) {
hipLaunchKernelGGL(( kSupersampleMediumTrans<12>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 13) {
hipLaunchKernelGGL(( kSupersampleMediumTrans<13>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 14) {
hipLaunchKernelGGL(( kSupersampleMediumTrans<14>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 15) {
hipLaunchKernelGGL(( kSupersampleMediumTrans<15>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 16) {
hipLaunchKernelGGL(( kSupersampleMediumTrans<16>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
}
} else {
if(factor == 2) {
hipLaunchKernelGGL(( kSupersampleMediumTransLoopy<2>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 3) {
hipLaunchKernelGGL(( kSupersampleMediumTransLoopy<3>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 4) {
hipLaunchKernelGGL(( kSupersampleMediumTransLoopy<4>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 5) {
hipLaunchKernelGGL(( kSupersampleMediumTransLoopy<5>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 6) {
hipLaunchKernelGGL(( kSupersampleMediumTransLoopy<6>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 7) {
hipLaunchKernelGGL(( kSupersampleMediumTransLoopy<7>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 8) {
hipLaunchKernelGGL(( kSupersampleMediumTransLoopy<8>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 9) {
hipLaunchKernelGGL(( kSupersampleMediumTransLoopy<9>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 10) {
hipLaunchKernelGGL(( kSupersampleMediumTransLoopy<10>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 11) {
hipLaunchKernelGGL(( kSupersampleMediumTransLoopy<11>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 12) {
hipLaunchKernelGGL(( kSupersampleMediumTransLoopy<12>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 13) {
hipLaunchKernelGGL(( kSupersampleMediumTransLoopy<13>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 14) {
hipLaunchKernelGGL(( kSupersampleMediumTransLoopy<14>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 15) {
hipLaunchKernelGGL(( kSupersampleMediumTransLoopy<15>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 16) {
hipLaunchKernelGGL(( kSupersampleMediumTransLoopy<16>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
}
}
}
/*cutilCheckMsg("kernel execution failed");*/
cuvSafeCall(hipDeviceSynchronize());
}
void _gtm(NVMatrix* images, NVMatrix* targets, int squareSize, bool avoidBankConflicts, bool reverse) {
assert(!images->isTrans());
int imgPixels = reverse ? targets->getNumCols() : images->getNumCols();
int numImages = reverse ? targets->getNumRows() : images->getNumRows();
// printf("images: %dx%d\n", images->getNumRows(), images->getNumCols());
// printf("targets: %dx%d\n", targets->getNumRows(), targets->getNumCols());
// printf("imgPixels: %d\n", imgPixels);
assert(sqrt(float(imgPixels)) == floor(sqrt(float(imgPixels))));
int imgSize = sqrt(imgPixels);
assert(squareSize > 1 && squareSize <= 16);
assert(imgSize > 0 && imgSize <= 512);
// assert(squareSize * imgSize <= 512);
assert(imgSize % squareSize == 0);
assert(imgSize > squareSize);
assert(targets->getNumElements() == images->getNumElements());
bool useLoopy = false;
int SHMEM_MAX = 8192; // don't use more than this much shmem
int THREADS_MAX = 512;
int threadsX = imgSize;
int threadsY = squareSize * MIN(THREADS_MAX / (squareSize*threadsX), SHMEM_MAX / (4*threadsX*squareSize)); // to avoid running out of shmem
if (threadsY == 0) {
threadsX = 16;
threadsY = squareSize * MIN(THREADS_MAX / (squareSize*threadsX), SHMEM_MAX / (4*threadsX*squareSize));
useLoopy = true;
// printf("using loopy\n");
}
int shmemX = squareSize;
int shmemPadX = avoidBankConflicts * (1 - (shmemX % 2));
shmemX += shmemPadX;
int shmemY = threadsX * (threadsY / squareSize);
int loopsYPerBlock = useLoopy ? GTM_LOOPY_BLOCK_LOOPS_Y : GTM_BLOCK_LOOPS_Y;
int blocksX = imgSize;
int blocksY = DIVUP(numImages, loopsYPerBlock * threadsY);
// printf("boundary problems: %u\n", numImages % (loopsYPerBlock*threadsY) != 0);
int shmem = 4 * shmemX * shmemY;
if (shmem == 0 || shmem > 16300) {
// this really shouldn't happen and i've only put this here as a precautionary measure
// to avoid getting mysteriously wrong results.
fprintf(stderr, "_gtm: not enough shared memory!");
exit(EXIT_FAILURE);
}
dim3 grid(blocksX, blocksY);
dim3 threads(threadsX, threadsY);
// printf("blocks: %dx%d, threads: %dx%d\n", blocksY, blocksX, threadsY, threadsX);
// printf("using %dx%d = %d bytes of shmem\n", shmemY, shmemX, shmem);
if(reverse) {
if(!useLoopy) {
if(squareSize == 2) {
hipLaunchKernelGGL(( kGridToMatrix<2, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 3) {
hipLaunchKernelGGL(( kGridToMatrix<3, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 4) {
hipLaunchKernelGGL(( kGridToMatrix<4, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 5) {
hipLaunchKernelGGL(( kGridToMatrix<5, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 6) {
hipLaunchKernelGGL(( kGridToMatrix<6, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 7) {
hipLaunchKernelGGL(( kGridToMatrix<7, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 8) {
hipLaunchKernelGGL(( kGridToMatrix<8, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 9) {
hipLaunchKernelGGL(( kGridToMatrix<9, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 10) {
hipLaunchKernelGGL(( kGridToMatrix<10, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 11) {
hipLaunchKernelGGL(( kGridToMatrix<11, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 12) {
hipLaunchKernelGGL(( kGridToMatrix<12, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 13) {
hipLaunchKernelGGL(( kGridToMatrix<13, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 14) {
hipLaunchKernelGGL(( kGridToMatrix<14, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 15) {
hipLaunchKernelGGL(( kGridToMatrix<15, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 16) {
hipLaunchKernelGGL(( kGridToMatrix<16, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
}
} else {
if(squareSize == 2) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<2, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 3) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<3, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 4) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<4, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 5) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<5, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 6) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<6, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 7) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<7, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 8) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<8, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 9) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<9, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 10) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<10, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 11) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<11, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 12) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<12, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 13) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<13, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 14) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<14, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 15) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<15, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 16) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<16, true>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
}
}
} else {
if(!useLoopy) {
if(squareSize == 2) {
hipLaunchKernelGGL(( kGridToMatrix<2, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 3) {
hipLaunchKernelGGL(( kGridToMatrix<3, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 4) {
hipLaunchKernelGGL(( kGridToMatrix<4, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 5) {
hipLaunchKernelGGL(( kGridToMatrix<5, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 6) {
hipLaunchKernelGGL(( kGridToMatrix<6, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 7) {
hipLaunchKernelGGL(( kGridToMatrix<7, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 8) {
hipLaunchKernelGGL(( kGridToMatrix<8, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 9) {
hipLaunchKernelGGL(( kGridToMatrix<9, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 10) {
hipLaunchKernelGGL(( kGridToMatrix<10, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 11) {
hipLaunchKernelGGL(( kGridToMatrix<11, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 12) {
hipLaunchKernelGGL(( kGridToMatrix<12, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 13) {
hipLaunchKernelGGL(( kGridToMatrix<13, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 14) {
hipLaunchKernelGGL(( kGridToMatrix<14, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 15) {
hipLaunchKernelGGL(( kGridToMatrix<15, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 16) {
hipLaunchKernelGGL(( kGridToMatrix<16, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
}
} else {
if(squareSize == 2) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<2, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 3) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<3, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 4) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<4, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 5) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<5, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 6) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<6, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 7) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<7, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 8) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<8, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 9) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<9, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 10) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<10, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 11) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<11, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 12) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<12, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 13) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<13, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 14) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<14, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 15) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<15, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 16) {
hipLaunchKernelGGL(( kGridToMatrixLoopy<16, false>), dim3(grid), dim3(threads), shmem, 0, images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
}
}
}
/*cutilCheckMsg("kernel execution failed");*/
cuvSafeCall(hipDeviceSynchronize());
}
void gridToMatrix(NVMatrix* images, NVMatrix* targets, int squareSize, bool avoidBankConflicts) {
_gtm(images, targets, squareSize, avoidBankConflicts, false);
}
void matrixToGrid(NVMatrix* images, NVMatrix* targets, int squareSize, bool avoidBankConflicts) {
_gtm(images, targets, squareSize, avoidBankConflicts, true);
}
/*
* Samples from a bunch of multinomial distributions, where each row of the "multi" matrix
* is a different distribution. Of course, each row of the "multi" matrix must sum to 1.
*
* It's optimized for the case when you want to sample from lots (hundreds of thousands)
* of fairly small multinomial distributions.
*
* The case when the multinomials are in columns is much easier and faster.
*/
void sampleMultinomial(NVMatrix* multi, NVMatrix* randoms, NVMatrix* targets) {
assert(!multi->isTrans());
assert(multi->isSameDims(*targets));
assert(multi->getNumCols() <= 1024);
assert(randoms->getNumElements() == multi->getNumRows());
int nomials = multi->getNumCols();
int multinomials = multi->getNumRows();
if(nomials > 256 || multinomials < 8192) {
/*
* I'm really not sure about the merits of this tree-based function. I may
* remove it in the future. It's faster in some cases (e.g. when the number of
* multinomials is small and the multinomials are very large), but you can get
* similar performance from the non-tree-based one by reducing the number of
* y-loops.
*/
dim3 grid(1, DIVUP(multinomials, 1));
while (grid.y > NUM_BLOCKS_MAX) {
grid.y = DIVUP(grid.y, 2);
grid.x *= 2;
}
// printf("grid: %dx%d\n", grid.x, grid.y);
if(nomials <= 64) { // yes i know this can't happen under current conditions
dim3 threads(32, 1);
hipLaunchKernelGGL(( kSampleMultinomial<32>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(), nomials, multinomials);
} else if(nomials <= 128) {
dim3 threads(64, 1);
hipLaunchKernelGGL(( kSampleMultinomial<64>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(), nomials, multinomials);
} else if(nomials <= 256) {
dim3 threads(128, 1);
hipLaunchKernelGGL(( kSampleMultinomial<128>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(), nomials, multinomials);
} else if(nomials <= 512) {
dim3 threads(256, 1);
hipLaunchKernelGGL(( kSampleMultinomial<256>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(), nomials, multinomials);
} else {
dim3 threads(512, 1);
hipLaunchKernelGGL(( kSampleMultinomial<512>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(), nomials, multinomials);
}
} else {
dim3 grid(1,DIVUP(multinomials, SSM_THREADS_Y*SSM_LOOPS_Y));
dim3 threads(SSM_THREADS_X, SSM_THREADS_Y);
while (grid.y > NUM_BLOCKS_MAX) {
grid.y = DIVUP(grid.y, 2);
grid.x *= 2;
}
if(nomials <= 16) {
if(nomials <= 4) {
hipLaunchKernelGGL(( kSampleSmallMultinomial<1, 4>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 8) {
hipLaunchKernelGGL(( kSampleSmallMultinomial<1, 8>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 12) {
hipLaunchKernelGGL(( kSampleSmallMultinomial<1, 12>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else {
hipLaunchKernelGGL(( kSampleSmallMultinomial<1, 16>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
}
} else if(nomials <= 32) {
hipLaunchKernelGGL(( kSampleSmallMultinomial<2, SSM_THREADS_X>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 48){
hipLaunchKernelGGL(( kSampleSmallMultinomial<3, SSM_THREADS_X>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 64){
hipLaunchKernelGGL(( kSampleSmallMultinomial<4, SSM_THREADS_X>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 80){
hipLaunchKernelGGL(( kSampleSmallMultinomial<5, SSM_THREADS_X>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 96){
hipLaunchKernelGGL(( kSampleSmallMultinomial<6, SSM_THREADS_X>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 112){
hipLaunchKernelGGL(( kSampleSmallMultinomial<7, SSM_THREADS_X>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 128){
hipLaunchKernelGGL(( kSampleSmallMultinomial<8, SSM_THREADS_X>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 144){
hipLaunchKernelGGL(( kSampleSmallMultinomial<9, SSM_THREADS_X>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 160){
hipLaunchKernelGGL(( kSampleSmallMultinomial<10, SSM_THREADS_X>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 176){
hipLaunchKernelGGL(( kSampleSmallMultinomial<11, SSM_THREADS_X>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 192){
hipLaunchKernelGGL(( kSampleSmallMultinomial<12, SSM_THREADS_X>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 208){
hipLaunchKernelGGL(( kSampleSmallMultinomial<13, SSM_THREADS_X>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 224){
hipLaunchKernelGGL(( kSampleSmallMultinomial<14, SSM_THREADS_X>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 240){
hipLaunchKernelGGL(( kSampleSmallMultinomial<15, SSM_THREADS_X>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 256){
hipLaunchKernelGGL(( kSampleSmallMultinomial<16, SSM_THREADS_X>), dim3(grid), dim3(threads), 0, 0, multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
}
}
/*cutilCheckMsg("kernel execution failed");*/
cuvSafeCall(hipDeviceSynchronize());
}
| 0717e07d2bea8e046441c14874ab85891e477d3b.cu | //*LB*
// Copyright (c) 2009, Alexander Krizhevsky
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of the University of Toronto
// nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//*LE*
/*
* conv_util.cu
*
* Created on: Nov 10, 2009
* Author: Alex Krizhevsky
*
* These are routines that are useful for convolutional neural nets/RBMs.
*/
/*#include <cutil_inline.h>*/
#include <assert.h>
#include "conv_util.cuh"
#include "conv_common.cuh"
#include "cuv/tools/cuv_general.hpp"
/*
* Block size 16x16
* Don't need shared memory on devices with compute capability 1.3 because memory
* doesn't have to be accessed sequentially by threads.
*
* This is far from perfect, and in many cases is actually slwoer than doing it on the
* CPU but still this takes so little time that it doesn't matter.
*/
__global__ void kRotate180(float* filters, float* targets, const int filterSize) {
// __shared__ float shFilter[16][16];
const int filtIdx = blockIdx.x;
const int readStart = MUL24(MUL24(filterSize, filterSize), filtIdx);
filters += readStart;
targets += readStart;
for(int y = threadIdx.y; y < filterSize; y += 16) {
for(int x = threadIdx.x; x < filterSize; x += 16) {
const int writeX = filterSize - 1 - x;
const int writeY = filterSize - 1 - y;
targets[MUL24(writeY, filterSize) + writeX] = filters[MUL24(y, filterSize) + x];
}
}
}
/*
* Block size 16x16.
* Probably a better idea to allocate multiple blocks per image so you don't have
* to loop inside the block.
*/
__global__ void kCopyInto(float* images, float* targets, const int imgSize, const int paddingSize, const int numImages) {
const int imgIdx = blockIdx.y * gridDim.x + blockIdx.x;
if (imgIdx < numImages) {
const int targetSize = imgSize + 2 * paddingSize;
images += imgIdx * imgSize * imgSize;
targets += imgIdx * targetSize * targetSize + MUL24(paddingSize, targetSize) + paddingSize;
for (int y = threadIdx.y; y < imgSize; y += 16) {
for (int x = threadIdx.x; x < imgSize; x += 16) {
targets[MUL24(y, targetSize) + x] = images[MUL24(y, imgSize) + x];
}
}
}
}
void rotate180(NVMatrix* filters, NVMatrix* targets, bool color=false) {
assert(!color || filters->getNumCols() % 3 == 0);
assert(!color && floor(sqrt(float(filters->getNumCols()))) == sqrt(float(filters->getNumCols()))
|| color && floor(sqrt(float(filters->getNumCols() / 3))) == sqrt(float(filters->getNumCols() / 3)));
assert(targets->isSameDims(*filters));
int numFilters = (color ? 3 : 1) * filters->getNumRows();
int filterSize = color ? int(sqrt(filters->getNumCols() / 3)) : int(sqrt(filters->getNumCols()));
dim3 threads(16, 16, 1);
dim3 blocks(numFilters, 1, 1);
kRotate180<<<blocks, threads>>>(filters->getDevData(), targets->getDevData(), filterSize);
/*cutilCheckMsg("kernel execution failed");*/
cuvSafeCall(cudaThreadSynchronize());
}
/*
* This function copies the images in "images" into "targets" and adds a padding.
*
* Specifically, suppose "images" contains just one image and it looks like this:
* IIII
* IIII
* IIII
*
* And targets looks like this:
* XXXXXX
* XXXXXX
* XXXXXX
* XXXXXX
* XXXXXX
*
* After this function is called, targets will look like this:
* XXXXXX
* XIIIIX
* XIIIIX
* XIIIIX
* XXXXXX
*
* Where the Is and Xs are arbitrary values.
*
* You can use this function to pad a bunch of images with a border of zeros. To do this,
* the targets matrix should be all zeros.
*
*/
void copyInto(NVMatrix* images, NVMatrix* targets, int paddingSize, bool color=false) {
assert(!color || images->getNumCols() % 3 == 0);
assert(!color && floor(sqrt(float(images->getNumCols()))) == sqrt(float(images->getNumCols()))
|| color && floor(sqrt(float(images->getNumCols() / 3))) == sqrt(float(images->getNumCols() / 3)));
int imgSize = color ? int(sqrt(images->getNumCols() / 3)) : int(sqrt(images->getNumCols()));
int numImages = (color ? 3 : 1) * images->getNumRows();
assert(targets->getNumElements() == numImages * (imgSize + 2*paddingSize)*(imgSize + 2*paddingSize));
dim3 threads(16, 16, 1);
dim3 blocks(numImages, 1, 1);
while(blocks.x > NUM_BLOCKS_MAX) {
blocks.x = DIVUP(blocks.x, 2);
blocks.y *= 2;
}
kCopyInto<<<blocks, threads>>>(images->getDevData(), targets->getDevData(), imgSize, paddingSize, numImages);
/*cutilCheckMsg("kernel execution failed");*/
cuvSafeCall(cudaThreadSynchronize());
}
/*
* f = factor, m = image size
* Converts a bunch of mxm images to (m/f)x(m/f) images by averaging non-overlapping fxf regions.
*
* The avoidBankConflicts option causes this function to use extra shared memory to avoid all
* bank conflicts. Most bank conflicts are avoided regardless of the setting of this parameter,
* and so setting this parameter to true will have minimal impact on performance (I noticed
* a 5% improvement). (stil can get 2-way conflicts if factor doesn't divide 16)
*/
void subsample(NVMatrix* images, NVMatrix* targets, int factor, bool avoidBankConflicts) {
int imgPixels = images->getNumCols();
assert(sqrt(float(imgPixels)) == floor(sqrt(float(imgPixels))));
int imgSize = sqrt(imgPixels);
assert(imgSize > factor);
assert(imgSize % factor == 0);
assert(factor <= 16);
assert(factor >= 2);
assert(imgSize <= 512);
// assert(factor % 2 == 0); // TODO: remove this restriction
int numRegions = images->getNumElements() / (factor*factor);
int numRegionsY = (imgSize / factor) * images->getNumRows();
assert(targets->getNumElements() == numRegions);
// assert(imgSize * (factor/2) <= 512); // for now
int regionsXPerBlock = imgSize / factor;
int numThreadsX = imgSize;
int SHMEM_MAX = 8192; // don't use more than this much shmem
int regionsYPerBlock = MIN(512 / numThreadsX, SHMEM_MAX / (4*imgSize)); // to avoid running out of shmem
// regionsYPerBlock--;
int regionsPerBlock = regionsYPerBlock * regionsXPerBlock;
// this will avoid all bank conflicts but may (?) use up too much shmem
int shmemPadX = avoidBankConflicts * (DIVUP(16,factor) + (regionsPerBlock % 16 == 0 ? 0 : 16 - regionsPerBlock % 16));
// shmemPadX = 0;
int shmemY = factor, shmemX = regionsPerBlock + shmemPadX;
int shmem = 4 * shmemX * shmemY;
if (shmem == 0 || shmem > 16300) {
// this really shouldn't happen and i've only put this here as a precautionary measure
// to avoid getting mysteriously wrong results.
fprintf(stderr, "subsample: not enough shared memory!");
exit(EXIT_FAILURE);
}
int numThreadsY = regionsYPerBlock;
// int blocks = numRegionsY / regionsYPerBlock;
int blocksX = imgSize / factor, blocksY = DIVUP(images->getNumRows(), regionsYPerBlock);
assert(blocksX < 65535 && blocksY < 65535);
// assert(numRegionsY % regionsYPerBlock == 0);
bool checkThreadBounds = numRegionsY % regionsYPerBlock != 0;
// printf("num regions y: %d, regions y per block: %d\n", numRegionsY, regionsYPerBlock);
dim3 grid(blocksX, blocksY);
dim3 threads(numThreadsX, numThreadsY);
// printf("grid: %ux%u, threads: %ux%u\n", grid.y, grid.x, threads.y, threads.x);
// printf("check bounds: %u\n", checkThreadBounds);
// printf("using %u bytes of shmem\n", shmem);
if (factor == 2) {
if (checkThreadBounds) {
kSubsample_noreduc<2, true><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
kSubsample_noreduc<2, false><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 3) {
if (checkThreadBounds) {
kSubsample_noreduc<3, true><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
kSubsample_noreduc<3, false><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 4) {
if (checkThreadBounds) {
kSubsample_noreduc<4, true><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
kSubsample_noreduc<4, false><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 5) {
if (checkThreadBounds) {
kSubsample_noreduc<5, true><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
kSubsample_noreduc<5, false><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 6) {
if (checkThreadBounds) {
kSubsample_noreduc<6, true><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
kSubsample_noreduc<6, false><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 7) {
if (checkThreadBounds) {
kSubsample_noreduc<7, true><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
kSubsample_noreduc<7, false><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 8) {
if (checkThreadBounds) {
kSubsample_noreduc<8, true><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
kSubsample_noreduc<8, false><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 9) {
if (checkThreadBounds) {
kSubsample_noreduc<9, true><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
kSubsample_noreduc<9, false><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 10) {
if (checkThreadBounds) {
kSubsample_noreduc<10, true><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
kSubsample_noreduc<10, false><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 11) {
if (checkThreadBounds) {
kSubsample_noreduc<11, true><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
kSubsample_noreduc<11, false><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 12) {
if (checkThreadBounds) {
kSubsample_noreduc<12, true><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
kSubsample_noreduc<12, false><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 13) {
if (checkThreadBounds) {
kSubsample_noreduc<13, true><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
kSubsample_noreduc<13, false><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 14) {
if (checkThreadBounds) {
kSubsample_noreduc<14, true><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
kSubsample_noreduc<14, false><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 15) {
if (checkThreadBounds) {
kSubsample_noreduc<15, true><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
kSubsample_noreduc<15, false><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
} else if (factor == 16) {
if (checkThreadBounds) {
kSubsample_noreduc<16, true><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
} else {
kSubsample_noreduc<16, false><<<grid, threads,shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY, shmemX);
}
}
/*cutilCheckMsg("kernel execution failed");*/
cuvSafeCall(cudaThreadSynchronize());
// if(factor == 4) {
//// kSubsample_reduc<4><<<grid, threads,4*numThreadsX*numThreadsY>>>(images->getDevData(), targets->getDevData(), imgSize, numRegionsY);
// }
}
/*
* This is kind of a mess...could use some cleanup.
* Blows up a bunch of mxm images to (mf)x(mf)
*/
void supersample(NVMatrix* images, NVMatrix* targets, int factor) {
bool trans = images->isTrans();
int imgPixels = images->getNumCols();
int numImages = images->getNumRows();
assert(sqrt(float(imgPixels)) == floor(sqrt(float(imgPixels))));
int imgSize = sqrt(imgPixels);
assert(factor > 1 && factor <= 16);
assert(imgSize > 0 && imgSize <= 512);
int targetPixels = targets->getNumCols();
assert(sqrt(float(targetPixels)) == floor(sqrt(float(targetPixels))));
int targetSize = sqrt(targetPixels);
assert(targetSize % factor == 0);
assert(targetSize / factor == imgSize);
assert(targets->getNumElements() == images->getNumElements() * factor*factor);
int threadsX, threadsY;
int SHMEM_MAX = 8192; // don't use more than this much shmem
int shmemX, shmemY, blocksX, blocksY;
bool useLoopy = false;
int THREADS_MAX_LOOPY = 512, THREADS_MAX = trans ? 256 : 512;
if (!trans) {
threadsX = imgSize;
threadsY = factor * MIN(THREADS_MAX / (factor*threadsX), SHMEM_MAX / (4*threadsX*factor)); // to avoid running out of shmem
if(threadsY == 0) {
assert(factor <= 32); // yes this is covered by assert above but in case i ever remove that
THREADS_MAX = 512;
useLoopy = true;
threadsX = MIN(16, imgSize); // not that imgsize can be < 16 here under current conditions
threadsY = factor * MIN(THREADS_MAX_LOOPY / (factor*threadsX), SHMEM_MAX / (4*threadsX*factor)); // to avoid running out of shmem
}
shmemY = threadsY;
shmemX = threadsX;
blocksX = imgSize;
blocksY = DIVUP(numImages, threadsY);
// printf("boundary problems: %u\n", numImages % threadsY != 0);
} else {
threadsY = imgSize;
threadsX = factor * MIN(THREADS_MAX / (factor*threadsY), SHMEM_MAX / (4*threadsY*factor)); // to avoid running out of shmem
if(threadsX < 8) {
useLoopy = true;
int xFactorMult = DIVUP(16, factor);
threadsX = xFactorMult * factor;
threadsY = THREADS_MAX / threadsX;
int newThreadsX = threadsX, newThreadsY = threadsY;
while (newThreadsY > 0 && imgSize % newThreadsY != 0) { // let's see if we can make threadsY divide imgSize
newThreadsX += factor;
newThreadsY = THREADS_MAX / newThreadsX;
}
if (newThreadsY > 0) {
threadsY = newThreadsY;
threadsX = newThreadsX;
}
assert(threadsY > 0);
}
shmemY = threadsX;
shmemX = threadsY + (1 - (threadsY % 2));
blocksX = DIVUP(numImages, threadsX);
blocksY = imgSize;
// printf("boundary problems: %u\n", numImages % threadsX != 0);
}
int shmem = 4 * shmemX * shmemY;
if (shmem == 0 || shmem > 16300) {
// this really shouldn't happen and i've only put this here as a precautionary measure
// to avoid getting mysteriously wrong results.
fprintf(stderr, "supersample: not enough shared memory!");
exit(EXIT_FAILURE);
}
dim3 grid(blocksX, blocksY);
dim3 threads(threadsX, threadsY);
// printf("blocks: %dx%d, threads: %dx%d\n", blocksY, blocksX, threadsY, threadsX);
// printf("using %dx%d = %d bytes of shmem\n", shmemY, shmemX, shmem);
if(!trans) {
if(!useLoopy) {
if(factor == 2) {
kSupersampleMedium<2><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 3) {
kSupersampleMedium<3><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 4) {
kSupersampleMedium<4><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 5) {
kSupersampleMedium<5><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 6) {
kSupersampleMedium<6><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 7) {
kSupersampleMedium<7><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 8) {
kSupersampleMedium<8><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 9) {
kSupersampleMedium<9><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 10) {
kSupersampleMedium<10><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 11) {
kSupersampleMedium<11><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 12) {
kSupersampleMedium<12><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 13) {
kSupersampleMedium<13><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 14) {
kSupersampleMedium<14><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 15) {
kSupersampleMedium<15><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 16) {
kSupersampleMedium<16><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
}
} else {
if(factor == 2) {
kSupersampleMediumLoopy<2><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 3) {
kSupersampleMediumLoopy<3><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 4) {
kSupersampleMediumLoopy<4><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 5) {
kSupersampleMediumLoopy<5><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 6) {
kSupersampleMediumLoopy<6><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 7) {
kSupersampleMediumLoopy<7><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 8) {
kSupersampleMediumLoopy<8><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 9) {
kSupersampleMediumLoopy<9><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 10) {
kSupersampleMediumLoopy<10><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 11) {
kSupersampleMediumLoopy<11><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 12) {
kSupersampleMediumLoopy<12><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 13) {
kSupersampleMediumLoopy<13><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 14) {
kSupersampleMediumLoopy<14><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 15) {
kSupersampleMediumLoopy<15><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
} else if(factor == 16) {
kSupersampleMediumLoopy<16><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize);
}
}
} else {
if(!useLoopy) {
if(factor == 2) {
kSupersampleMediumTrans<2><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 3) {
kSupersampleMediumTrans<3><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 4) {
kSupersampleMediumTrans<4><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 5) {
kSupersampleMediumTrans<5><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 6) {
kSupersampleMediumTrans<6><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 7) {
kSupersampleMediumTrans<7><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 8) {
kSupersampleMediumTrans<8><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 9) {
kSupersampleMediumTrans<9><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 10) {
kSupersampleMediumTrans<10><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 11) {
kSupersampleMediumTrans<11><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 12) {
kSupersampleMediumTrans<12><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 13) {
kSupersampleMediumTrans<13><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 14) {
kSupersampleMediumTrans<14><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 15) {
kSupersampleMediumTrans<15><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 16) {
kSupersampleMediumTrans<16><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
}
} else {
if(factor == 2) {
kSupersampleMediumTransLoopy<2><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 3) {
kSupersampleMediumTransLoopy<3><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 4) {
kSupersampleMediumTransLoopy<4><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 5) {
kSupersampleMediumTransLoopy<5><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 6) {
kSupersampleMediumTransLoopy<6><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 7) {
kSupersampleMediumTransLoopy<7><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 8) {
kSupersampleMediumTransLoopy<8><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 9) {
kSupersampleMediumTransLoopy<9><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 10) {
kSupersampleMediumTransLoopy<10><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 11) {
kSupersampleMediumTransLoopy<11><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 12) {
kSupersampleMediumTransLoopy<12><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 13) {
kSupersampleMediumTransLoopy<13><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 14) {
kSupersampleMediumTransLoopy<14><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 15) {
kSupersampleMediumTransLoopy<15><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
} else if(factor == 16) {
kSupersampleMediumTransLoopy<16><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), numImages*imgSize, imgSize, shmemX);
}
}
}
/*cutilCheckMsg("kernel execution failed");*/
cuvSafeCall(cudaThreadSynchronize());
}
void _gtm(NVMatrix* images, NVMatrix* targets, int squareSize, bool avoidBankConflicts, bool reverse) {
assert(!images->isTrans());
int imgPixels = reverse ? targets->getNumCols() : images->getNumCols();
int numImages = reverse ? targets->getNumRows() : images->getNumRows();
// printf("images: %dx%d\n", images->getNumRows(), images->getNumCols());
// printf("targets: %dx%d\n", targets->getNumRows(), targets->getNumCols());
// printf("imgPixels: %d\n", imgPixels);
assert(sqrt(float(imgPixels)) == floor(sqrt(float(imgPixels))));
int imgSize = sqrt(imgPixels);
assert(squareSize > 1 && squareSize <= 16);
assert(imgSize > 0 && imgSize <= 512);
// assert(squareSize * imgSize <= 512);
assert(imgSize % squareSize == 0);
assert(imgSize > squareSize);
assert(targets->getNumElements() == images->getNumElements());
bool useLoopy = false;
int SHMEM_MAX = 8192; // don't use more than this much shmem
int THREADS_MAX = 512;
int threadsX = imgSize;
int threadsY = squareSize * MIN(THREADS_MAX / (squareSize*threadsX), SHMEM_MAX / (4*threadsX*squareSize)); // to avoid running out of shmem
if (threadsY == 0) {
threadsX = 16;
threadsY = squareSize * MIN(THREADS_MAX / (squareSize*threadsX), SHMEM_MAX / (4*threadsX*squareSize));
useLoopy = true;
// printf("using loopy\n");
}
int shmemX = squareSize;
int shmemPadX = avoidBankConflicts * (1 - (shmemX % 2));
shmemX += shmemPadX;
int shmemY = threadsX * (threadsY / squareSize);
int loopsYPerBlock = useLoopy ? GTM_LOOPY_BLOCK_LOOPS_Y : GTM_BLOCK_LOOPS_Y;
int blocksX = imgSize;
int blocksY = DIVUP(numImages, loopsYPerBlock * threadsY);
// printf("boundary problems: %u\n", numImages % (loopsYPerBlock*threadsY) != 0);
int shmem = 4 * shmemX * shmemY;
if (shmem == 0 || shmem > 16300) {
// this really shouldn't happen and i've only put this here as a precautionary measure
// to avoid getting mysteriously wrong results.
fprintf(stderr, "_gtm: not enough shared memory!");
exit(EXIT_FAILURE);
}
dim3 grid(blocksX, blocksY);
dim3 threads(threadsX, threadsY);
// printf("blocks: %dx%d, threads: %dx%d\n", blocksY, blocksX, threadsY, threadsX);
// printf("using %dx%d = %d bytes of shmem\n", shmemY, shmemX, shmem);
if(reverse) {
if(!useLoopy) {
if(squareSize == 2) {
kGridToMatrix<2, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 3) {
kGridToMatrix<3, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 4) {
kGridToMatrix<4, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 5) {
kGridToMatrix<5, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 6) {
kGridToMatrix<6, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 7) {
kGridToMatrix<7, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 8) {
kGridToMatrix<8, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 9) {
kGridToMatrix<9, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 10) {
kGridToMatrix<10, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 11) {
kGridToMatrix<11, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 12) {
kGridToMatrix<12, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 13) {
kGridToMatrix<13, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 14) {
kGridToMatrix<14, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 15) {
kGridToMatrix<15, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 16) {
kGridToMatrix<16, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
}
} else {
if(squareSize == 2) {
kGridToMatrixLoopy<2, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 3) {
kGridToMatrixLoopy<3, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 4) {
kGridToMatrixLoopy<4, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 5) {
kGridToMatrixLoopy<5, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 6) {
kGridToMatrixLoopy<6, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 7) {
kGridToMatrixLoopy<7, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 8) {
kGridToMatrixLoopy<8, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 9) {
kGridToMatrixLoopy<9, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 10) {
kGridToMatrixLoopy<10, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 11) {
kGridToMatrixLoopy<11, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 12) {
kGridToMatrixLoopy<12, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 13) {
kGridToMatrixLoopy<13, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 14) {
kGridToMatrixLoopy<14, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 15) {
kGridToMatrixLoopy<15, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 16) {
kGridToMatrixLoopy<16, true><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
}
}
} else {
if(!useLoopy) {
if(squareSize == 2) {
kGridToMatrix<2, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 3) {
kGridToMatrix<3, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 4) {
kGridToMatrix<4, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 5) {
kGridToMatrix<5, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 6) {
kGridToMatrix<6, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 7) {
kGridToMatrix<7, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 8) {
kGridToMatrix<8, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 9) {
kGridToMatrix<9, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 10) {
kGridToMatrix<10, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 11) {
kGridToMatrix<11, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 12) {
kGridToMatrix<12, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 13) {
kGridToMatrix<13, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 14) {
kGridToMatrix<14, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 15) {
kGridToMatrix<15, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 16) {
kGridToMatrix<16, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
}
} else {
if(squareSize == 2) {
kGridToMatrixLoopy<2, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 3) {
kGridToMatrixLoopy<3, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 4) {
kGridToMatrixLoopy<4, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 5) {
kGridToMatrixLoopy<5, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 6) {
kGridToMatrixLoopy<6, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 7) {
kGridToMatrixLoopy<7, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 8) {
kGridToMatrixLoopy<8, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 9) {
kGridToMatrixLoopy<9, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 10) {
kGridToMatrixLoopy<10, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 11) {
kGridToMatrixLoopy<11, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 12) {
kGridToMatrixLoopy<12, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 13) {
kGridToMatrixLoopy<13, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 14) {
kGridToMatrixLoopy<14, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 15) {
kGridToMatrixLoopy<15, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
} else if(squareSize == 16) {
kGridToMatrixLoopy<16, false><<<grid, threads, shmem>>>(images->getDevData(), targets->getDevData(), imgSize, numImages*imgSize, shmemX);
}
}
}
/*cutilCheckMsg("kernel execution failed");*/
cuvSafeCall(cudaThreadSynchronize());
}
void gridToMatrix(NVMatrix* images, NVMatrix* targets, int squareSize, bool avoidBankConflicts) {
_gtm(images, targets, squareSize, avoidBankConflicts, false);
}
void matrixToGrid(NVMatrix* images, NVMatrix* targets, int squareSize, bool avoidBankConflicts) {
_gtm(images, targets, squareSize, avoidBankConflicts, true);
}
/*
* Samples from a bunch of multinomial distributions, where each row of the "multi" matrix
* is a different distribution. Of course, each row of the "multi" matrix must sum to 1.
*
* It's optimized for the case when you want to sample from lots (hundreds of thousands)
* of fairly small multinomial distributions.
*
* The case when the multinomials are in columns is much easier and faster.
*/
void sampleMultinomial(NVMatrix* multi, NVMatrix* randoms, NVMatrix* targets) {
assert(!multi->isTrans());
assert(multi->isSameDims(*targets));
assert(multi->getNumCols() <= 1024);
assert(randoms->getNumElements() == multi->getNumRows());
int nomials = multi->getNumCols();
int multinomials = multi->getNumRows();
if(nomials > 256 || multinomials < 8192) {
/*
* I'm really not sure about the merits of this tree-based function. I may
* remove it in the future. It's faster in some cases (e.g. when the number of
* multinomials is small and the multinomials are very large), but you can get
* similar performance from the non-tree-based one by reducing the number of
* y-loops.
*/
dim3 grid(1, DIVUP(multinomials, 1));
while (grid.y > NUM_BLOCKS_MAX) {
grid.y = DIVUP(grid.y, 2);
grid.x *= 2;
}
// printf("grid: %dx%d\n", grid.x, grid.y);
if(nomials <= 64) { // yes i know this can't happen under current conditions
dim3 threads(32, 1);
kSampleMultinomial<32><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(), nomials, multinomials);
} else if(nomials <= 128) {
dim3 threads(64, 1);
kSampleMultinomial<64><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(), nomials, multinomials);
} else if(nomials <= 256) {
dim3 threads(128, 1);
kSampleMultinomial<128><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(), nomials, multinomials);
} else if(nomials <= 512) {
dim3 threads(256, 1);
kSampleMultinomial<256><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(), nomials, multinomials);
} else {
dim3 threads(512, 1);
kSampleMultinomial<512><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(), nomials, multinomials);
}
} else {
dim3 grid(1,DIVUP(multinomials, SSM_THREADS_Y*SSM_LOOPS_Y));
dim3 threads(SSM_THREADS_X, SSM_THREADS_Y);
while (grid.y > NUM_BLOCKS_MAX) {
grid.y = DIVUP(grid.y, 2);
grid.x *= 2;
}
if(nomials <= 16) {
if(nomials <= 4) {
kSampleSmallMultinomial<1, 4><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 8) {
kSampleSmallMultinomial<1, 8><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 12) {
kSampleSmallMultinomial<1, 12><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else {
kSampleSmallMultinomial<1, 16><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
}
} else if(nomials <= 32) {
kSampleSmallMultinomial<2, SSM_THREADS_X><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 48){
kSampleSmallMultinomial<3, SSM_THREADS_X><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 64){
kSampleSmallMultinomial<4, SSM_THREADS_X><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 80){
kSampleSmallMultinomial<5, SSM_THREADS_X><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 96){
kSampleSmallMultinomial<6, SSM_THREADS_X><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 112){
kSampleSmallMultinomial<7, SSM_THREADS_X><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 128){
kSampleSmallMultinomial<8, SSM_THREADS_X><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 144){
kSampleSmallMultinomial<9, SSM_THREADS_X><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 160){
kSampleSmallMultinomial<10, SSM_THREADS_X><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 176){
kSampleSmallMultinomial<11, SSM_THREADS_X><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 192){
kSampleSmallMultinomial<12, SSM_THREADS_X><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 208){
kSampleSmallMultinomial<13, SSM_THREADS_X><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 224){
kSampleSmallMultinomial<14, SSM_THREADS_X><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 240){
kSampleSmallMultinomial<15, SSM_THREADS_X><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
} else if(nomials <= 256){
kSampleSmallMultinomial<16, SSM_THREADS_X><<<grid, threads>>>(multi->getDevData(), randoms->getDevData(), targets->getDevData(),nomials, multinomials);
}
}
/*cutilCheckMsg("kernel execution failed");*/
cuvSafeCall(cudaThreadSynchronize());
}
|
9bb699f587844266f3cdfc44c6c22c90dbca3f4a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief The cugraph Jaccard core functionality
*
* @file jaccard.cu
* ---------------------------------------------------------------------------**/
#include <rmm/thrust_rmm_allocator.h>
#include <utilities/error_utils.h>
#include "graph.hpp"
#include "utilities/graph_utils.cuh"
namespace cugraph {
namespace detail {
// Volume of neighboors (*weight_s)
// TODO: Identical kernel to jaccard_row_sum!!
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) overlap_row_sum(
vertex_t n, edge_t const *csrPtr, vertex_t const *csrInd, weight_t const *v, weight_t *work)
{
vertex_t row;
edge_t start, end, length;
weight_t sum;
for (row = threadIdx.y + blockIdx.y * blockDim.y; row < n; row += gridDim.y * blockDim.y) {
start = csrPtr[row];
end = csrPtr[row + 1];
length = end - start;
// compute row sums
if (weighted) {
sum = parallel_prefix_sum(length, csrInd + start, v);
if (threadIdx.x == 0) work[row] = sum;
} else {
work[row] = static_cast<weight_t>(length);
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
// TODO: Identical kernel to jaccard_row_sum!!
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) overlap_is(vertex_t n,
edge_t const *csrPtr,
vertex_t const *csrInd,
weight_t const *v,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s)
{
edge_t i, j, Ni, Nj;
vertex_t row, col;
vertex_t ref, cur, ref_col, cur_col, match;
weight_t ref_val;
for (row = threadIdx.z + blockIdx.z * blockDim.z; row < n; row += gridDim.z * blockDim.z) {
for (j = csrPtr[row] + threadIdx.y + blockIdx.y * blockDim.y; j < csrPtr[row + 1];
j += gridDim.y * blockDim.y) {
col = csrInd[j];
// find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
// compute new sum weights
weight_s[j] = min(work[row], work[col]);
// compute new intersection weights
// search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
} else {
ref_val = 1.0;
}
// binary search (column indices are sorted within each row)
edge_t left = csrPtr[cur];
edge_t right = csrPtr[cur + 1] - 1;
while (left <= right) {
edge_t middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
} else if (cur_col < ref_col) {
left = middle + 1;
} else {
match = middle;
break;
}
}
// if the element with the same column index in the reference row has been found
if (match != -1) { atomicAdd(&weight_i[j], ref_val); }
}
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
// Using list of node pairs
// NOTE: NOT the same as jaccard
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
overlap_is_pairs(edge_t num_pairs,
edge_t const *csrPtr,
vertex_t const *csrInd,
vertex_t const *first_pair,
vertex_t const *second_pair,
weight_t const *v,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s)
{
edge_t i, idx, Ni, Nj, match;
vertex_t row, col, ref, cur, ref_col, cur_col;
weight_t ref_val;
for (idx = threadIdx.z + blockIdx.z * blockDim.z; idx < num_pairs;
idx += gridDim.z * blockDim.z) {
row = first_pair[idx];
col = second_pair[idx];
// find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
// compute new sum weights
weight_s[idx] = min(work[row], work[col]);
// compute new intersection weights
// search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
} else {
ref_val = 1.0;
}
// binary search (column indices are sorted within each row)
edge_t left = csrPtr[cur];
edge_t right = csrPtr[cur + 1] - 1;
while (left <= right) {
edge_t middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
} else if (cur_col < ref_col) {
left = middle + 1;
} else {
match = middle;
break;
}
}
// if the element with the same column index in the reference row has been found
if (match != -1) { atomicAdd(&weight_i[idx], ref_val); }
}
}
}
// Overlap weights (*weight)
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) overlap_jw(edge_t e,
edge_t const *csrPtr,
vertex_t const *csrInd,
weight_t *weight_i,
weight_t *weight_s,
weight_t *weight_j)
{
edge_t j;
weight_t Wi, Wu;
for (j = threadIdx.x + blockIdx.x * blockDim.x; j < e; j += gridDim.x * blockDim.x) {
Wi = weight_i[j];
Wu = weight_s[j];
weight_j[j] = (Wi / Wu);
}
}
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
int overlap(vertex_t n,
edge_t e,
edge_t const *csrPtr,
vertex_t const *csrInd,
weight_t const *weight_in,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s,
weight_t *weight_j)
{
dim3 nthreads, nblocks;
int y = 4;
// setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, vertex_t{CUDA_MAX_BLOCKS});
nblocks.z = 1;
// launch kernel
hipLaunchKernelGGL(( overlap_row_sum<weighted, vertex_t, edge_t, weight_t>)
, dim3(nblocks), dim3(nthreads), 0, 0, n, csrPtr, csrInd, weight_in, work);
hipDeviceSynchronize();
fill(e, weight_i, weight_t{0.0});
// setup launch configuration
nthreads.x = 32 / y;
nthreads.y = y;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, vertex_t{CUDA_MAX_BLOCKS}); // 1;
// launch kernel
hipLaunchKernelGGL(( overlap_is<weighted, vertex_t, edge_t, weight_t>)
, dim3(nblocks), dim3(nthreads), 0, 0, n, csrPtr, csrInd, weight_in, work, weight_i, weight_s);
// setup launch configuration
nthreads.x = min(e, edge_t{CUDA_MAX_KERNEL_THREADS});
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((e + nthreads.x - 1) / nthreads.x, edge_t{CUDA_MAX_BLOCKS});
nblocks.y = 1;
nblocks.z = 1;
// launch kernel
hipLaunchKernelGGL(( overlap_jw<weighted, vertex_t, edge_t, weight_t>)
, dim3(nblocks), dim3(nthreads), 0, 0, e, csrPtr, csrInd, weight_i, weight_s, weight_j);
return 0;
}
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
int overlap_pairs(vertex_t n,
edge_t num_pairs,
edge_t const *csrPtr,
vertex_t const *csrInd,
vertex_t const *first_pair,
vertex_t const *second_pair,
weight_t const *weight_in,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s,
weight_t *weight_j)
{
dim3 nthreads, nblocks;
int y = 4;
// setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, vertex_t{CUDA_MAX_BLOCKS});
nblocks.z = 1;
// launch kernel
hipLaunchKernelGGL(( overlap_row_sum<weighted, vertex_t, edge_t, weight_t>)
, dim3(nblocks), dim3(nthreads), 0, 0, n, csrPtr, csrInd, weight_in, work);
hipDeviceSynchronize();
fill(num_pairs, weight_i, weight_t{0.0});
// setup launch configuration
nthreads.x = 32;
nthreads.y = 1;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, vertex_t{CUDA_MAX_BLOCKS}); // 1;
// launch kernel
hipLaunchKernelGGL(( overlap_is_pairs<weighted, vertex_t, edge_t, weight_t>), dim3(nblocks), dim3(nthreads), 0, 0,
num_pairs, csrPtr, csrInd, first_pair, second_pair, weight_in, work, weight_i, weight_s);
// setup launch configuration
nthreads.x = min(num_pairs, edge_t{CUDA_MAX_KERNEL_THREADS});
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((num_pairs + nthreads.x - 1) / nthreads.x, edge_t{CUDA_MAX_BLOCKS});
nblocks.y = 1;
nblocks.z = 1;
// launch kernel
hipLaunchKernelGGL(( overlap_jw<weighted, vertex_t, edge_t, weight_t>)
, dim3(nblocks), dim3(nthreads), 0, 0, num_pairs, csrPtr, csrInd, weight_i, weight_s, weight_j);
return 0;
}
} // namespace detail
template <typename VT, typename ET, typename WT>
void overlap(experimental::GraphCSRView<VT, ET, WT> const &graph, WT const *weights, WT *result)
{
CUGRAPH_EXPECTS(result != nullptr, "Invalid API parameter: result pointer is NULL");
rmm::device_vector<WT> weight_i(graph.number_of_edges);
rmm::device_vector<WT> weight_s(graph.number_of_edges);
rmm::device_vector<WT> work(graph.number_of_vertices);
if (weights == nullptr) {
cugraph::detail::overlap<false, VT, ET, WT>(graph.number_of_vertices,
graph.number_of_edges,
graph.offsets,
graph.indices,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
} else {
cugraph::detail::overlap<true, VT, ET, WT>(graph.number_of_vertices,
graph.number_of_edges,
graph.offsets,
graph.indices,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
}
}
template <typename VT, typename ET, typename WT>
void overlap_list(experimental::GraphCSRView<VT, ET, WT> const &graph,
WT const *weights,
ET num_pairs,
VT const *first,
VT const *second,
WT *result)
{
CUGRAPH_EXPECTS(result != nullptr, "Invalid API parameter: result pointer is NULL");
CUGRAPH_EXPECTS(first != nullptr, "Invalid API parameter: first column is NULL");
CUGRAPH_EXPECTS(second != nullptr, "Invalid API parameter: second column is NULL");
rmm::device_vector<WT> weight_i(num_pairs);
rmm::device_vector<WT> weight_s(num_pairs);
rmm::device_vector<WT> work(graph.number_of_vertices);
if (weights == nullptr) {
cugraph::detail::overlap_pairs<false, VT, ET, WT>(graph.number_of_vertices,
num_pairs,
graph.offsets,
graph.indices,
first,
second,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
} else {
cugraph::detail::overlap_pairs<true, VT, ET, WT>(graph.number_of_vertices,
num_pairs,
graph.offsets,
graph.indices,
first,
second,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
}
}
template void overlap<int32_t, int32_t, float>(
experimental::GraphCSRView<int32_t, int32_t, float> const &, float const *, float *);
template void overlap<int32_t, int32_t, double>(
experimental::GraphCSRView<int32_t, int32_t, double> const &, double const *, double *);
template void overlap<int64_t, int64_t, float>(
experimental::GraphCSRView<int64_t, int64_t, float> const &, float const *, float *);
template void overlap<int64_t, int64_t, double>(
experimental::GraphCSRView<int64_t, int64_t, double> const &, double const *, double *);
template void overlap_list<int32_t, int32_t, float>(
experimental::GraphCSRView<int32_t, int32_t, float> const &,
float const *,
int32_t,
int32_t const *,
int32_t const *,
float *);
template void overlap_list<int32_t, int32_t, double>(
experimental::GraphCSRView<int32_t, int32_t, double> const &,
double const *,
int32_t,
int32_t const *,
int32_t const *,
double *);
template void overlap_list<int64_t, int64_t, float>(
experimental::GraphCSRView<int64_t, int64_t, float> const &,
float const *,
int64_t,
int64_t const *,
int64_t const *,
float *);
template void overlap_list<int64_t, int64_t, double>(
experimental::GraphCSRView<int64_t, int64_t, double> const &,
double const *,
int64_t,
int64_t const *,
int64_t const *,
double *);
} // namespace cugraph
| 9bb699f587844266f3cdfc44c6c22c90dbca3f4a.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief The cugraph Jaccard core functionality
*
* @file jaccard.cu
* ---------------------------------------------------------------------------**/
#include <rmm/thrust_rmm_allocator.h>
#include <utilities/error_utils.h>
#include "graph.hpp"
#include "utilities/graph_utils.cuh"
namespace cugraph {
namespace detail {
// Volume of neighboors (*weight_s)
// TODO: Identical kernel to jaccard_row_sum!!
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) overlap_row_sum(
vertex_t n, edge_t const *csrPtr, vertex_t const *csrInd, weight_t const *v, weight_t *work)
{
vertex_t row;
edge_t start, end, length;
weight_t sum;
for (row = threadIdx.y + blockIdx.y * blockDim.y; row < n; row += gridDim.y * blockDim.y) {
start = csrPtr[row];
end = csrPtr[row + 1];
length = end - start;
// compute row sums
if (weighted) {
sum = parallel_prefix_sum(length, csrInd + start, v);
if (threadIdx.x == 0) work[row] = sum;
} else {
work[row] = static_cast<weight_t>(length);
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
// TODO: Identical kernel to jaccard_row_sum!!
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) overlap_is(vertex_t n,
edge_t const *csrPtr,
vertex_t const *csrInd,
weight_t const *v,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s)
{
edge_t i, j, Ni, Nj;
vertex_t row, col;
vertex_t ref, cur, ref_col, cur_col, match;
weight_t ref_val;
for (row = threadIdx.z + blockIdx.z * blockDim.z; row < n; row += gridDim.z * blockDim.z) {
for (j = csrPtr[row] + threadIdx.y + blockIdx.y * blockDim.y; j < csrPtr[row + 1];
j += gridDim.y * blockDim.y) {
col = csrInd[j];
// find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
// compute new sum weights
weight_s[j] = min(work[row], work[col]);
// compute new intersection weights
// search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
} else {
ref_val = 1.0;
}
// binary search (column indices are sorted within each row)
edge_t left = csrPtr[cur];
edge_t right = csrPtr[cur + 1] - 1;
while (left <= right) {
edge_t middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
} else if (cur_col < ref_col) {
left = middle + 1;
} else {
match = middle;
break;
}
}
// if the element with the same column index in the reference row has been found
if (match != -1) { atomicAdd(&weight_i[j], ref_val); }
}
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
// Using list of node pairs
// NOTE: NOT the same as jaccard
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
overlap_is_pairs(edge_t num_pairs,
edge_t const *csrPtr,
vertex_t const *csrInd,
vertex_t const *first_pair,
vertex_t const *second_pair,
weight_t const *v,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s)
{
edge_t i, idx, Ni, Nj, match;
vertex_t row, col, ref, cur, ref_col, cur_col;
weight_t ref_val;
for (idx = threadIdx.z + blockIdx.z * blockDim.z; idx < num_pairs;
idx += gridDim.z * blockDim.z) {
row = first_pair[idx];
col = second_pair[idx];
// find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
// compute new sum weights
weight_s[idx] = min(work[row], work[col]);
// compute new intersection weights
// search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
} else {
ref_val = 1.0;
}
// binary search (column indices are sorted within each row)
edge_t left = csrPtr[cur];
edge_t right = csrPtr[cur + 1] - 1;
while (left <= right) {
edge_t middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
} else if (cur_col < ref_col) {
left = middle + 1;
} else {
match = middle;
break;
}
}
// if the element with the same column index in the reference row has been found
if (match != -1) { atomicAdd(&weight_i[idx], ref_val); }
}
}
}
// Overlap weights (*weight)
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) overlap_jw(edge_t e,
edge_t const *csrPtr,
vertex_t const *csrInd,
weight_t *weight_i,
weight_t *weight_s,
weight_t *weight_j)
{
edge_t j;
weight_t Wi, Wu;
for (j = threadIdx.x + blockIdx.x * blockDim.x; j < e; j += gridDim.x * blockDim.x) {
Wi = weight_i[j];
Wu = weight_s[j];
weight_j[j] = (Wi / Wu);
}
}
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
int overlap(vertex_t n,
edge_t e,
edge_t const *csrPtr,
vertex_t const *csrInd,
weight_t const *weight_in,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s,
weight_t *weight_j)
{
dim3 nthreads, nblocks;
int y = 4;
// setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, vertex_t{CUDA_MAX_BLOCKS});
nblocks.z = 1;
// launch kernel
overlap_row_sum<weighted, vertex_t, edge_t, weight_t>
<<<nblocks, nthreads>>>(n, csrPtr, csrInd, weight_in, work);
cudaDeviceSynchronize();
fill(e, weight_i, weight_t{0.0});
// setup launch configuration
nthreads.x = 32 / y;
nthreads.y = y;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, vertex_t{CUDA_MAX_BLOCKS}); // 1;
// launch kernel
overlap_is<weighted, vertex_t, edge_t, weight_t>
<<<nblocks, nthreads>>>(n, csrPtr, csrInd, weight_in, work, weight_i, weight_s);
// setup launch configuration
nthreads.x = min(e, edge_t{CUDA_MAX_KERNEL_THREADS});
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((e + nthreads.x - 1) / nthreads.x, edge_t{CUDA_MAX_BLOCKS});
nblocks.y = 1;
nblocks.z = 1;
// launch kernel
overlap_jw<weighted, vertex_t, edge_t, weight_t>
<<<nblocks, nthreads>>>(e, csrPtr, csrInd, weight_i, weight_s, weight_j);
return 0;
}
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
int overlap_pairs(vertex_t n,
edge_t num_pairs,
edge_t const *csrPtr,
vertex_t const *csrInd,
vertex_t const *first_pair,
vertex_t const *second_pair,
weight_t const *weight_in,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s,
weight_t *weight_j)
{
dim3 nthreads, nblocks;
int y = 4;
// setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, vertex_t{CUDA_MAX_BLOCKS});
nblocks.z = 1;
// launch kernel
overlap_row_sum<weighted, vertex_t, edge_t, weight_t>
<<<nblocks, nthreads>>>(n, csrPtr, csrInd, weight_in, work);
cudaDeviceSynchronize();
fill(num_pairs, weight_i, weight_t{0.0});
// setup launch configuration
nthreads.x = 32;
nthreads.y = 1;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, vertex_t{CUDA_MAX_BLOCKS}); // 1;
// launch kernel
overlap_is_pairs<weighted, vertex_t, edge_t, weight_t><<<nblocks, nthreads>>>(
num_pairs, csrPtr, csrInd, first_pair, second_pair, weight_in, work, weight_i, weight_s);
// setup launch configuration
nthreads.x = min(num_pairs, edge_t{CUDA_MAX_KERNEL_THREADS});
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((num_pairs + nthreads.x - 1) / nthreads.x, edge_t{CUDA_MAX_BLOCKS});
nblocks.y = 1;
nblocks.z = 1;
// launch kernel
overlap_jw<weighted, vertex_t, edge_t, weight_t>
<<<nblocks, nthreads>>>(num_pairs, csrPtr, csrInd, weight_i, weight_s, weight_j);
return 0;
}
} // namespace detail
template <typename VT, typename ET, typename WT>
void overlap(experimental::GraphCSRView<VT, ET, WT> const &graph, WT const *weights, WT *result)
{
CUGRAPH_EXPECTS(result != nullptr, "Invalid API parameter: result pointer is NULL");
rmm::device_vector<WT> weight_i(graph.number_of_edges);
rmm::device_vector<WT> weight_s(graph.number_of_edges);
rmm::device_vector<WT> work(graph.number_of_vertices);
if (weights == nullptr) {
cugraph::detail::overlap<false, VT, ET, WT>(graph.number_of_vertices,
graph.number_of_edges,
graph.offsets,
graph.indices,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
} else {
cugraph::detail::overlap<true, VT, ET, WT>(graph.number_of_vertices,
graph.number_of_edges,
graph.offsets,
graph.indices,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
}
}
template <typename VT, typename ET, typename WT>
void overlap_list(experimental::GraphCSRView<VT, ET, WT> const &graph,
WT const *weights,
ET num_pairs,
VT const *first,
VT const *second,
WT *result)
{
CUGRAPH_EXPECTS(result != nullptr, "Invalid API parameter: result pointer is NULL");
CUGRAPH_EXPECTS(first != nullptr, "Invalid API parameter: first column is NULL");
CUGRAPH_EXPECTS(second != nullptr, "Invalid API parameter: second column is NULL");
rmm::device_vector<WT> weight_i(num_pairs);
rmm::device_vector<WT> weight_s(num_pairs);
rmm::device_vector<WT> work(graph.number_of_vertices);
if (weights == nullptr) {
cugraph::detail::overlap_pairs<false, VT, ET, WT>(graph.number_of_vertices,
num_pairs,
graph.offsets,
graph.indices,
first,
second,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
} else {
cugraph::detail::overlap_pairs<true, VT, ET, WT>(graph.number_of_vertices,
num_pairs,
graph.offsets,
graph.indices,
first,
second,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
}
}
template void overlap<int32_t, int32_t, float>(
experimental::GraphCSRView<int32_t, int32_t, float> const &, float const *, float *);
template void overlap<int32_t, int32_t, double>(
experimental::GraphCSRView<int32_t, int32_t, double> const &, double const *, double *);
template void overlap<int64_t, int64_t, float>(
experimental::GraphCSRView<int64_t, int64_t, float> const &, float const *, float *);
template void overlap<int64_t, int64_t, double>(
experimental::GraphCSRView<int64_t, int64_t, double> const &, double const *, double *);
template void overlap_list<int32_t, int32_t, float>(
experimental::GraphCSRView<int32_t, int32_t, float> const &,
float const *,
int32_t,
int32_t const *,
int32_t const *,
float *);
template void overlap_list<int32_t, int32_t, double>(
experimental::GraphCSRView<int32_t, int32_t, double> const &,
double const *,
int32_t,
int32_t const *,
int32_t const *,
double *);
template void overlap_list<int64_t, int64_t, float>(
experimental::GraphCSRView<int64_t, int64_t, float> const &,
float const *,
int64_t,
int64_t const *,
int64_t const *,
float *);
template void overlap_list<int64_t, int64_t, double>(
experimental::GraphCSRView<int64_t, int64_t, double> const &,
double const *,
int64_t,
int64_t const *,
int64_t const *,
double *);
} // namespace cugraph
|
d23a1c875d64272dc98d01ad98d1f4cd2abcb32f.hip | // !!! This is a file automatically generated by hipify!!!
/*
Fractal code for CS 4380 / CS 5351
Copyright (c) 2016, Texas State University. All rights reserved.
Redistribution in source or binary form, with or without modification,
is not permitted. Use in source and binary forms, with or without
modification, is only permitted for academic use in CS 4380 or CS 5351
at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
Co-Author: Darren Rambaud
*/
#include <cstdlib>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "cs43805351.h"
#include <math.h>
static const int ThreadsPerBlock = 512;
static const float Delta = 0.005491;
static const float xMid = 0.745796;
static const float yMid = 0.105089;
static __global__
void FractalKernel(const int frames, const int width, unsigned char pic[])
{
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < frames * (width * width)) {
const int col = idx % width;
const int row = (idx / width) % width;
const int frame = idx / (width * width);
const float myDelta = Delta * pow(0.99, frame+1); //loop dep fixed
const float xMin = xMid - myDelta;
const float yMin = yMid - myDelta;
const float dw = 2.0 * myDelta / width;
//todo: compute a single pixel here
if (row < width) { // bounds checking, ensures no wasted calc
const float cy = -yMin - row * dw;
if (col < width) { // bounds checking
const float cx = -xMin - col * dw;
float x = cx;
float y = cy;
int depth = 256;
float x2, y2;
do {
x2 = x * x;
y2 = y * y;
y = 2 * x * y + cy;
x = x2 - y2 + cx;
depth--;
} while ((depth > 0) && ((x2 + y2) < 5.0));
pic[frame * width * width + row * width + col] \
= (unsigned char)depth;
}
}
}
}
int main(int argc, char *argv[])
{
printf("Fractal v1.5 [CUDA]\n");
// check command line
if (argc != 3) {
fprintf(stderr, "usage: %s frame_width num_frames\n", \
argv[0]);
exit(-1);
}
int width = atoi(argv[1]);
if (width < 10) {
fprintf(stderr, "error: frame_width must be at least 10\n");
exit(-1);
}
int frames = atoi(argv[2]);
if (frames < 1) {
fprintf(stderr, "error: num_frames must be at least 1\n");
exit(-1);
}
printf("computing %d frames of %d by %d fractal\n", \
frames, width, width);
// allocate picture array
unsigned char* pic = new unsigned char[frames * width * width];
unsigned char* pic_d;
if (hipSuccess != \
hipMalloc((void **)&pic_d, frames * width * width * \
sizeof(unsigned char))) {
fprintf(stderr, "could not allocate memory\n");
exit(-1);
}
// start time
struct timeval start, end;
gettimeofday(&start, NULL);
// compute frames
hipLaunchKernelGGL(( FractalKernel), \
(frames * width * width + (ThreadsPerBlock - 1)) / ThreadsPerBlock, \
ThreadsPerBlock, 0, 0, frames, width, pic_d);
if (hipSuccess != \
hipMemcpy(pic, pic_d, frames * width * width * \
sizeof(unsigned char), hipMemcpyDeviceToHost)) {
fprintf(stderr, "copying from device failed\n");
exit(-1);
}
// end time
gettimeofday(&end, NULL);
double runtime = end.tv_sec + end.tv_usec / 1000000.0 - \
start.tv_sec - start.tv_usec / 1000000.0;
printf("compute time: %.4f s\n", runtime);
// verify result by writing frames to BMP files
if ((width <= 400) && (frames <= 30)) {
for (int frame = 0; frame < frames; frame++) {
char name[32];
sprintf(name, "fractal%d.bmp", frame + 1000);
writeBMP(width, width, &pic[frame * width * width], name);
}
}
delete [] pic;
hipFree(pic_d);
return 0;
}
| d23a1c875d64272dc98d01ad98d1f4cd2abcb32f.cu | /*
Fractal code for CS 4380 / CS 5351
Copyright (c) 2016, Texas State University. All rights reserved.
Redistribution in source or binary form, with or without modification,
is not permitted. Use in source and binary forms, with or without
modification, is only permitted for academic use in CS 4380 or CS 5351
at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
Co-Author: Darren Rambaud
*/
#include <cstdlib>
#include <sys/time.h>
#include <cuda.h>
#include "cs43805351.h"
#include <math.h>
static const int ThreadsPerBlock = 512;
static const float Delta = 0.005491;
static const float xMid = 0.745796;
static const float yMid = 0.105089;
static __global__
void FractalKernel(const int frames, const int width, unsigned char pic[])
{
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < frames * (width * width)) {
const int col = idx % width;
const int row = (idx / width) % width;
const int frame = idx / (width * width);
const float myDelta = Delta * pow(0.99, frame+1); //loop dep fixed
const float xMin = xMid - myDelta;
const float yMin = yMid - myDelta;
const float dw = 2.0 * myDelta / width;
//todo: compute a single pixel here
if (row < width) { // bounds checking, ensures no wasted calc
const float cy = -yMin - row * dw;
if (col < width) { // bounds checking
const float cx = -xMin - col * dw;
float x = cx;
float y = cy;
int depth = 256;
float x2, y2;
do {
x2 = x * x;
y2 = y * y;
y = 2 * x * y + cy;
x = x2 - y2 + cx;
depth--;
} while ((depth > 0) && ((x2 + y2) < 5.0));
pic[frame * width * width + row * width + col] \
= (unsigned char)depth;
}
}
}
}
int main(int argc, char *argv[])
{
printf("Fractal v1.5 [CUDA]\n");
// check command line
if (argc != 3) {
fprintf(stderr, "usage: %s frame_width num_frames\n", \
argv[0]);
exit(-1);
}
int width = atoi(argv[1]);
if (width < 10) {
fprintf(stderr, "error: frame_width must be at least 10\n");
exit(-1);
}
int frames = atoi(argv[2]);
if (frames < 1) {
fprintf(stderr, "error: num_frames must be at least 1\n");
exit(-1);
}
printf("computing %d frames of %d by %d fractal\n", \
frames, width, width);
// allocate picture array
unsigned char* pic = new unsigned char[frames * width * width];
unsigned char* pic_d;
if (cudaSuccess != \
cudaMalloc((void **)&pic_d, frames * width * width * \
sizeof(unsigned char))) {
fprintf(stderr, "could not allocate memory\n");
exit(-1);
}
// start time
struct timeval start, end;
gettimeofday(&start, NULL);
// compute frames
FractalKernel<<<\
(frames * width * width + (ThreadsPerBlock - 1)) / ThreadsPerBlock, \
ThreadsPerBlock>>>(frames, width, pic_d);
if (cudaSuccess != \
cudaMemcpy(pic, pic_d, frames * width * width * \
sizeof(unsigned char), cudaMemcpyDeviceToHost)) {
fprintf(stderr, "copying from device failed\n");
exit(-1);
}
// end time
gettimeofday(&end, NULL);
double runtime = end.tv_sec + end.tv_usec / 1000000.0 - \
start.tv_sec - start.tv_usec / 1000000.0;
printf("compute time: %.4f s\n", runtime);
// verify result by writing frames to BMP files
if ((width <= 400) && (frames <= 30)) {
for (int frame = 0; frame < frames; frame++) {
char name[32];
sprintf(name, "fractal%d.bmp", frame + 1000);
writeBMP(width, width, &pic[frame * width * width], name);
}
}
delete [] pic;
cudaFree(pic_d);
return 0;
}
|
0f2de219ca324f3a2b9e382f52062ef2892292e9.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <math.h>
#include <ctime>
#include <random> // for c++11 random number generation on host
#include <cstdio>
#include <cstdlib> // for min/max
#include <hiprand/hiprand_kernel.h> // random number
#include <hip/device_functions.h>
#include <time.h>
#include <fstream>
#include <string>
#include <numeric>
#include <hiprand/hiprand.h>
#define N 500 // curand_state objects or grid size
using namespace std;
//debug outputs
#define CUDA_KERNEL_DEBUG 0 //test for illegal memory access
#define OUTPUT_PRE 1 // preprocess debug output
#define OUTPUT_POST 1 //postprocess debug output
// Error wrapper
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort = true)
{
if (code != hipSuccess) {
std::cout << "GPUassert: " << hipGetErrorString(code) << " / " << file << " " << line << std::endl;
//fcout << stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
else {
if (CUDA_KERNEL_DEBUG == 1) {
std::cout << "success GPUassert: " << hipGetErrorString(code) << " / " << file << " " << line << std::endl;
}
}
}
//initialize random numbers
__device__ void init( hiprandState_t* states) {
int Idx = blockIdx.x *blockDim.x + threadIdx.x; // each core has threads of random numbers
hiprand_init(clock(), Idx, 0, &states[Idx]);
}
//__device__ float hiprand_uniform(hiprandState_t *states) {
//
// return hiprand_uniform(&states);
//
//};
__global__ void randoms( hiprandState_t* states, float* num)
{
int Idx = blockIdx.x *blockDim.x + threadIdx.x;
init(states);
num[Idx] = hiprand_uniform(&states[Idx]);
}
int main()
{
dim3 dimblock; //threads
dim3 dimgrid; // blocks
hiprandState_t* states;
hipMalloc((void**)&states, N * sizeof(hiprandState_t));
float cpu_nums[N];
float* gpu_nums;
hipMalloc((void**) &gpu_nums, N * sizeof(float));
//invoke random number kernel
dimgrid.x = N / 2; dimgrid.y = N / 2; dimgrid.z = 1; // grid of blocks
dimblock.x = (N / 2 + (N / 2 - 1)) / (N / 2); dimblock.y = (N / 2 + (N / 2 - 1)) / (N / 2); dimblock.z = 1; // block of threads
// allocate array of ints on CPU and GPUs
randoms << < dimgrid, dimblock >> > (states, gpu_nums);
hipMemcpy(cpu_nums, gpu_nums, N * sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
cout << "the " << i << " numbers is: " << cpu_nums[i] << endl;
}
hipFree(states);
hipFree(gpu_nums);
cin.get();
return 0;
} | 0f2de219ca324f3a2b9e382f52062ef2892292e9.cu |
#include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <math.h>
#include <ctime>
#include <random> // for c++11 random number generation on host
#include <cstdio>
#include <cstdlib> // for min/max
#include <curand_kernel.h> // random number
#include <device_functions.h>
#include <time.h>
#include <fstream>
#include <string>
#include <numeric>
#include <curand.h>
#define N 500 // curand_state objects or grid size
using namespace std;
//debug outputs
#define CUDA_KERNEL_DEBUG 0 //test for illegal memory access
#define OUTPUT_PRE 1 // preprocess debug output
#define OUTPUT_POST 1 //postprocess debug output
// Error wrapper
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort = true)
{
if (code != cudaSuccess) {
std::cout << "GPUassert: " << cudaGetErrorString(code) << " / " << file << " " << line << std::endl;
//fcout << stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
else {
if (CUDA_KERNEL_DEBUG == 1) {
std::cout << "success GPUassert: " << cudaGetErrorString(code) << " / " << file << " " << line << std::endl;
}
}
}
//initialize random numbers
__device__ void init( curandState_t* states) {
int Idx = blockIdx.x *blockDim.x + threadIdx.x; // each core has threads of random numbers
curand_init(clock(), Idx, 0, &states[Idx]);
}
//__device__ float curand_uniform(curandState_t *states) {
//
// return curand_uniform(&states);
//
//};
__global__ void randoms( curandState_t* states, float* num)
{
int Idx = blockIdx.x *blockDim.x + threadIdx.x;
init(states);
num[Idx] = curand_uniform(&states[Idx]);
}
int main()
{
dim3 dimblock; //threads
dim3 dimgrid; // blocks
curandState_t* states;
cudaMalloc((void**)&states, N * sizeof(curandState_t));
float cpu_nums[N];
float* gpu_nums;
cudaMalloc((void**) &gpu_nums, N * sizeof(float));
//invoke random number kernel
dimgrid.x = N / 2; dimgrid.y = N / 2; dimgrid.z = 1; // grid of blocks
dimblock.x = (N / 2 + (N / 2 - 1)) / (N / 2); dimblock.y = (N / 2 + (N / 2 - 1)) / (N / 2); dimblock.z = 1; // block of threads
// allocate array of ints on CPU and GPUs
randoms << < dimgrid, dimblock >> > (states, gpu_nums);
cudaMemcpy(cpu_nums, gpu_nums, N * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
cout << "the " << i << " numbers is: " << cpu_nums[i] << endl;
}
cudaFree(states);
cudaFree(gpu_nums);
cin.get();
return 0;
} |
ebdd4a86c4567694cca558d7f65ee0c08624e17c.hip | // !!! This is a file automatically generated by hipify!!!
#include <chrono>
#include <cstdlib>
#include <cstdio>
#include <hip/hip_runtime.h>
#define BLOCK_SIZE 256
// A C model derived from the OpenCL kernel
void softMax_cpu(const int numSlice, const int sliceSize, const float* src, float* dest) {
for (int i = 0; i < numSlice; i++) {
float max_ = src[i * sliceSize];
for (int j = 0; j < sliceSize; j++) {
max_ = (max_ < src[i * sliceSize + j]) ? src[i * sliceSize + j] : max_;
}
float sum = 0;
for (int j = 0; j < sliceSize; j++) {
float e = expf(src[i * sliceSize + j] - max_);
sum += e;
dest[i * sliceSize + j] = e;
}
for (int j = 0; j < sliceSize; j++) {
dest[i * sliceSize + j] /= sum;
}
}
}
__global__
void softMax (const int numSlice, const int sliceSize,
const float* src, float* dest)
{
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numSlice) return;
float max_ = src[i * sliceSize];
for (int j = 0; j < sliceSize; j++) {
max_ = max(max_, src[i * sliceSize + j]);
}
float sum = 0;
for (int j = 0; j < sliceSize; j++) {
sum += expf(src[i * sliceSize + j] - max_);
}
for (int j = 0; j < sliceSize; j++) {
dest[i * sliceSize + j] = expf(src[i * sliceSize + j] - max_) / sum;
}
}
int main(int argc, char* argv[]) {
if (argc != 4) {
printf("Usage: %s <number of slices> <slice size> <repeat>\n", argv[0]);
return 1;
}
int numSlice = atoi(argv[1]);
int sliceSize = atoi(argv[2]);
int repeat = atoi(argv[3]);
int numElem = numSlice * sliceSize;
float* input = (float*) aligned_alloc(1024, sizeof(float) * numElem);
float* output_gpu = (float*) aligned_alloc(1024, sizeof(float) * numElem);
float* output_cpu = (float*) aligned_alloc(1024, sizeof(float) * numElem);
srand(2);
for (int i = 0; i < numSlice; i++)
for (int j = 0; j < sliceSize; j++)
input[i*sliceSize+j] = rand() % 13;
float *d_input, *d_output;
hipMalloc((void**)&d_input, sizeof(float) * numElem);
hipMalloc((void**)&d_output, sizeof(float) * numElem);
hipMemcpy(d_input, input, sizeof(float) * numElem, hipMemcpyHostToDevice);
dim3 global_work_size ((numSlice+BLOCK_SIZE-1)/BLOCK_SIZE*BLOCK_SIZE);
dim3 local_work_size (BLOCK_SIZE);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int n = 0; n < repeat; n++) {
hipLaunchKernelGGL(( softMax), dim3(global_work_size), dim3(local_work_size), 0, 0, numSlice, sliceSize, d_input, d_output);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat);
hipMemcpy(output_gpu, d_output, sizeof(float) * numElem, hipMemcpyDeviceToHost);
// verification
bool ok = true;
softMax_cpu(numSlice, sliceSize, input, output_cpu);
for (int i = 0; i < numElem; i++) {
if (fabsf(output_cpu[i] - output_gpu[i]) > 1e-3) {
printf("@index %d host: %f device: %f\n", i, output_cpu[i], output_gpu[i]);
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
free(input);
free(output_cpu);
free(output_gpu);
hipFree(d_input);
hipFree(d_output);
return 0;
}
| ebdd4a86c4567694cca558d7f65ee0c08624e17c.cu | #include <chrono>
#include <cstdlib>
#include <cstdio>
#include <cuda.h>
#define BLOCK_SIZE 256
// A C model derived from the OpenCL kernel
void softMax_cpu(const int numSlice, const int sliceSize, const float* src, float* dest) {
for (int i = 0; i < numSlice; i++) {
float max_ = src[i * sliceSize];
for (int j = 0; j < sliceSize; j++) {
max_ = (max_ < src[i * sliceSize + j]) ? src[i * sliceSize + j] : max_;
}
float sum = 0;
for (int j = 0; j < sliceSize; j++) {
float e = expf(src[i * sliceSize + j] - max_);
sum += e;
dest[i * sliceSize + j] = e;
}
for (int j = 0; j < sliceSize; j++) {
dest[i * sliceSize + j] /= sum;
}
}
}
__global__
void softMax (const int numSlice, const int sliceSize,
const float* src, float* dest)
{
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numSlice) return;
float max_ = src[i * sliceSize];
for (int j = 0; j < sliceSize; j++) {
max_ = max(max_, src[i * sliceSize + j]);
}
float sum = 0;
for (int j = 0; j < sliceSize; j++) {
sum += expf(src[i * sliceSize + j] - max_);
}
for (int j = 0; j < sliceSize; j++) {
dest[i * sliceSize + j] = expf(src[i * sliceSize + j] - max_) / sum;
}
}
int main(int argc, char* argv[]) {
if (argc != 4) {
printf("Usage: %s <number of slices> <slice size> <repeat>\n", argv[0]);
return 1;
}
int numSlice = atoi(argv[1]);
int sliceSize = atoi(argv[2]);
int repeat = atoi(argv[3]);
int numElem = numSlice * sliceSize;
float* input = (float*) aligned_alloc(1024, sizeof(float) * numElem);
float* output_gpu = (float*) aligned_alloc(1024, sizeof(float) * numElem);
float* output_cpu = (float*) aligned_alloc(1024, sizeof(float) * numElem);
srand(2);
for (int i = 0; i < numSlice; i++)
for (int j = 0; j < sliceSize; j++)
input[i*sliceSize+j] = rand() % 13;
float *d_input, *d_output;
cudaMalloc((void**)&d_input, sizeof(float) * numElem);
cudaMalloc((void**)&d_output, sizeof(float) * numElem);
cudaMemcpy(d_input, input, sizeof(float) * numElem, cudaMemcpyHostToDevice);
dim3 global_work_size ((numSlice+BLOCK_SIZE-1)/BLOCK_SIZE*BLOCK_SIZE);
dim3 local_work_size (BLOCK_SIZE);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int n = 0; n < repeat; n++) {
softMax<<<global_work_size, local_work_size>>>(numSlice, sliceSize, d_input, d_output);
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat);
cudaMemcpy(output_gpu, d_output, sizeof(float) * numElem, cudaMemcpyDeviceToHost);
// verification
bool ok = true;
softMax_cpu(numSlice, sliceSize, input, output_cpu);
for (int i = 0; i < numElem; i++) {
if (fabsf(output_cpu[i] - output_gpu[i]) > 1e-3) {
printf("@index %d host: %f device: %f\n", i, output_cpu[i], output_gpu[i]);
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
free(input);
free(output_cpu);
free(output_gpu);
cudaFree(d_input);
cudaFree(d_output);
return 0;
}
|
a030ab697ed56fa3c65176a0e23be9388de55f5f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* The MIT License
*
* Copyright 2018 Ahmed Tarek.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "util.h"
/**
* Rectifier Linear Unit
*/
extern "C"
__global__ void ReLU(float* v, long size) {
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < size && v[row] < 0.0f)
v[row] = 0.0f;
}
/**
* Leaky Rectifier Linear Unit
*/
extern "C"
__global__ void LeakyReLU(float* v, long size, float alpha) {
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < size && v[row] < 0.0f)
v[row] = v[row] * alpha;
}
/**
* Sigmoid
*/
extern "C"
__global__ void Sigmoid(float* v, long size) {
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < size)
v[row] = 1.0 / (1 + safe_exp(-v[row]));
}
/**
* Sin
*/
extern "C"
__global__ void Sin(float* v, long size) {
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < size)
v[row] = sin(v[row]);
}
/**
* Tanh
*/
extern "C"
__global__ void Tanh(float* v, long size) {
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < size)
v[row] = tanh(v[row]);
}
/**
* Step
*/
extern "C"
__global__ void Step(float* v, long size) {
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < size)
v[row] = v[row] >= 0 ? 1 : 0;
}
/**
* SoftMax
*/
template<const int BLOCK_SIZE>
__forceinline__
__device__ float reduceBlockMax(float array[BLOCK_SIZE], float value) {
unsigned tid = threadIdx.x;
array[tid] = value;
__syncthreads();
#pragma unroll
for(int i = BLOCK_SIZE / 2; i > 32; i /= 2) {
if(tid < i)
array[tid] = max(array[tid], array[tid + i]);
__syncthreads();
}
if(tid < 32) {
float v = array[tid];
if(BLOCK_SIZE >= 64) {
v = max(v, array[tid+32]); __syncwarp();
array[tid] = v; __syncwarp();
}
v = max(v, array[tid+16]); __syncwarp();
array[tid] = v; __syncwarp();
v = max(v, array[tid+8]); __syncwarp();
array[tid] = v; __syncwarp();
v = max(v, array[tid+4]); __syncwarp();
array[tid] = v; __syncwarp();
v = max(v, array[tid+2]); __syncwarp();
array[tid] = v; __syncwarp();
v = max(v, array[tid+1]); __syncwarp();
array[tid] = v;
}
__syncthreads();
return array[0];
}
template<const int BLOCK_SIZE>
__forceinline__
__device__ float reduceBlockSum(float array[BLOCK_SIZE], float value) {
unsigned tid = threadIdx.x;
array[tid] = value;
__syncthreads();
#pragma unroll
for(int i = BLOCK_SIZE / 2; i > 32; i /= 2) {
if(tid < i)
array[tid] += array[tid + i];
__syncthreads();
}
if(tid < 32) {
float v = array[tid];
if(BLOCK_SIZE >= 64) {
v += array[tid+32]; __syncwarp();
array[tid] = v; __syncwarp();
}
v += array[tid+16]; __syncwarp();
array[tid] = v; __syncwarp();
v += array[tid+8]; __syncwarp();
array[tid] = v; __syncwarp();
v += array[tid+4]; __syncwarp();
array[tid] = v; __syncwarp();
v += array[tid+2]; __syncwarp();
array[tid] = v; __syncwarp();
v += array[tid+1]; __syncwarp();
array[tid] = v;
}
__syncthreads();
return array[0];
}
template<const int BLOCK_SIZE>
__device__ void SoftMax(float* v, long size) {
__shared__ float temp[BLOCK_SIZE];
int blockIndex = blockIdx.x * size;
const int iterations = (size - 1) / BLOCK_SIZE + 1;
float tmax = FLOAT_MIN;
for(int i=0; i < iterations; i++) {
int index = threadIdx.x + i * BLOCK_SIZE;
if(index < size) {
float value = v[blockIndex + index];
tmax = max(tmax, value);
}
}
/* get max over block */
float blockMax = reduceBlockMax<BLOCK_SIZE>(temp, tmax);
/* calculate exp */
float sum = 0;
for(int i=0; i < iterations; i++) {
int index = threadIdx.x + i * BLOCK_SIZE;
if(index < size) {
float value = safe_exp(v[blockIndex + index] - blockMax);
sum += value;
v[blockIndex + index] = value;
}
}
/* get sum over block */
float blockSum = reduceBlockSum<BLOCK_SIZE>(temp, sum);
/* calculate final value and store */
for(int i=0; i < iterations; i++) {
int index = threadIdx.x + i * BLOCK_SIZE;
if(index < size) {
if(blockSum == 0) {
v[blockIndex + index] = 1.0 / size;
} else
v[blockIndex + index] = v[blockIndex + index] / blockSum;
}
}
}
extern "C"
__global__ void SoftMax_32(float* v, long size) {
SoftMax<32>(v, size);
}
extern "C"
__global__ void SoftMax_64(float* v, long size) {
SoftMax<64>(v, size);
}
extern "C"
__global__ void SoftMax_128(float* v, long size) {
SoftMax<128>(v, size);
}
extern "C"
__global__ void SoftMax_256(float* v, long size) {
SoftMax<256>(v, size);
}
extern "C"
__global__ void SoftMax_512(float* v, long size) {
SoftMax<512>(v, size);
}
extern "C"
__global__ void SoftMax_1(float* v, long size, float* sums) {
__shared__ float cache[THREADS_PER_BLOCK];
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row >= size)
return;
cache[threadIdx.x] = safe_exp(v[row]);
v[row] = cache[threadIdx.x];
__syncthreads();
sumBlock(cache, min((long)blockDim.x, size - blockDim.x * blockIdx.x));
/* each block creates a sum */
if(threadIdx.x == 0)
sums[blockIdx.x] = cache[0];
}
extern "C"
__global__ void SoftMax_2(float* v, long size, float* sums, long sums_size) {
__shared__ float cache[THREADS_PER_BLOCK];
int loops = calcIterations(blockDim.x, sums_size);
cache[threadIdx.x] = 0;
/*
* Calculate total sum from "sums" array
* should loop only once unless size > THREADS_PER_BLOCK^2
*/
for(int i=0; i < loops; i++) {
int index = i * blockDim.x + threadIdx.x;
if(index < sums_size)
cache[threadIdx.x] += sums[index];
}
__syncthreads();
sumBlock(cache, min((long)blockDim.x, sums_size));
__syncthreads();
double sum = cache[0];
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < size)
if(sum == 0) {
v[row] = 1.0 / size;
} else {
v[row] /= sum;
}
}
extern "C"
__global__ void empty_0() {
}
extern "C"
__global__ void empty_1(int i) {
}
| a030ab697ed56fa3c65176a0e23be9388de55f5f.cu | /*
* The MIT License
*
* Copyright 2018 Ahmed Tarek.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <cuda.h>
#include <cuda_runtime_api.h>
#include "util.h"
/**
* Rectifier Linear Unit
*/
extern "C"
__global__ void ReLU(float* v, long size) {
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < size && v[row] < 0.0f)
v[row] = 0.0f;
}
/**
* Leaky Rectifier Linear Unit
*/
extern "C"
__global__ void LeakyReLU(float* v, long size, float alpha) {
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < size && v[row] < 0.0f)
v[row] = v[row] * alpha;
}
/**
* Sigmoid
*/
extern "C"
__global__ void Sigmoid(float* v, long size) {
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < size)
v[row] = 1.0 / (1 + safe_exp(-v[row]));
}
/**
* Sin
*/
extern "C"
__global__ void Sin(float* v, long size) {
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < size)
v[row] = sin(v[row]);
}
/**
* Tanh
*/
extern "C"
__global__ void Tanh(float* v, long size) {
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < size)
v[row] = tanh(v[row]);
}
/**
* Step
*/
extern "C"
__global__ void Step(float* v, long size) {
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < size)
v[row] = v[row] >= 0 ? 1 : 0;
}
/**
* SoftMax
*/
template<const int BLOCK_SIZE>
__forceinline__
__device__ float reduceBlockMax(float array[BLOCK_SIZE], float value) {
unsigned tid = threadIdx.x;
array[tid] = value;
__syncthreads();
#pragma unroll
for(int i = BLOCK_SIZE / 2; i > 32; i /= 2) {
if(tid < i)
array[tid] = max(array[tid], array[tid + i]);
__syncthreads();
}
if(tid < 32) {
float v = array[tid];
if(BLOCK_SIZE >= 64) {
v = max(v, array[tid+32]); __syncwarp();
array[tid] = v; __syncwarp();
}
v = max(v, array[tid+16]); __syncwarp();
array[tid] = v; __syncwarp();
v = max(v, array[tid+8]); __syncwarp();
array[tid] = v; __syncwarp();
v = max(v, array[tid+4]); __syncwarp();
array[tid] = v; __syncwarp();
v = max(v, array[tid+2]); __syncwarp();
array[tid] = v; __syncwarp();
v = max(v, array[tid+1]); __syncwarp();
array[tid] = v;
}
__syncthreads();
return array[0];
}
template<const int BLOCK_SIZE>
__forceinline__
__device__ float reduceBlockSum(float array[BLOCK_SIZE], float value) {
unsigned tid = threadIdx.x;
array[tid] = value;
__syncthreads();
#pragma unroll
for(int i = BLOCK_SIZE / 2; i > 32; i /= 2) {
if(tid < i)
array[tid] += array[tid + i];
__syncthreads();
}
if(tid < 32) {
float v = array[tid];
if(BLOCK_SIZE >= 64) {
v += array[tid+32]; __syncwarp();
array[tid] = v; __syncwarp();
}
v += array[tid+16]; __syncwarp();
array[tid] = v; __syncwarp();
v += array[tid+8]; __syncwarp();
array[tid] = v; __syncwarp();
v += array[tid+4]; __syncwarp();
array[tid] = v; __syncwarp();
v += array[tid+2]; __syncwarp();
array[tid] = v; __syncwarp();
v += array[tid+1]; __syncwarp();
array[tid] = v;
}
__syncthreads();
return array[0];
}
template<const int BLOCK_SIZE>
__device__ void SoftMax(float* v, long size) {
__shared__ float temp[BLOCK_SIZE];
int blockIndex = blockIdx.x * size;
const int iterations = (size - 1) / BLOCK_SIZE + 1;
float tmax = FLOAT_MIN;
for(int i=0; i < iterations; i++) {
int index = threadIdx.x + i * BLOCK_SIZE;
if(index < size) {
float value = v[blockIndex + index];
tmax = max(tmax, value);
}
}
/* get max over block */
float blockMax = reduceBlockMax<BLOCK_SIZE>(temp, tmax);
/* calculate exp */
float sum = 0;
for(int i=0; i < iterations; i++) {
int index = threadIdx.x + i * BLOCK_SIZE;
if(index < size) {
float value = safe_exp(v[blockIndex + index] - blockMax);
sum += value;
v[blockIndex + index] = value;
}
}
/* get sum over block */
float blockSum = reduceBlockSum<BLOCK_SIZE>(temp, sum);
/* calculate final value and store */
for(int i=0; i < iterations; i++) {
int index = threadIdx.x + i * BLOCK_SIZE;
if(index < size) {
if(blockSum == 0) {
v[blockIndex + index] = 1.0 / size;
} else
v[blockIndex + index] = v[blockIndex + index] / blockSum;
}
}
}
extern "C"
__global__ void SoftMax_32(float* v, long size) {
SoftMax<32>(v, size);
}
extern "C"
__global__ void SoftMax_64(float* v, long size) {
SoftMax<64>(v, size);
}
extern "C"
__global__ void SoftMax_128(float* v, long size) {
SoftMax<128>(v, size);
}
extern "C"
__global__ void SoftMax_256(float* v, long size) {
SoftMax<256>(v, size);
}
extern "C"
__global__ void SoftMax_512(float* v, long size) {
SoftMax<512>(v, size);
}
extern "C"
__global__ void SoftMax_1(float* v, long size, float* sums) {
__shared__ float cache[THREADS_PER_BLOCK];
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row >= size)
return;
cache[threadIdx.x] = safe_exp(v[row]);
v[row] = cache[threadIdx.x];
__syncthreads();
sumBlock(cache, min((long)blockDim.x, size - blockDim.x * blockIdx.x));
/* each block creates a sum */
if(threadIdx.x == 0)
sums[blockIdx.x] = cache[0];
}
extern "C"
__global__ void SoftMax_2(float* v, long size, float* sums, long sums_size) {
__shared__ float cache[THREADS_PER_BLOCK];
int loops = calcIterations(blockDim.x, sums_size);
cache[threadIdx.x] = 0;
/*
* Calculate total sum from "sums" array
* should loop only once unless size > THREADS_PER_BLOCK^2
*/
for(int i=0; i < loops; i++) {
int index = i * blockDim.x + threadIdx.x;
if(index < sums_size)
cache[threadIdx.x] += sums[index];
}
__syncthreads();
sumBlock(cache, min((long)blockDim.x, sums_size));
__syncthreads();
double sum = cache[0];
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < size)
if(sum == 0) {
v[row] = 1.0 / size;
} else {
v[row] /= sum;
}
}
extern "C"
__global__ void empty_0() {
}
extern "C"
__global__ void empty_1(int i) {
}
|
1dd8d091f009b2a378c5c29bd832406b10fd466d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
using namespace PyTorchMemEffAttention;
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128, true>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x128_k128_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x128_k128_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x128_k128_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x128_k128_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 128, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 128, true>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k128_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 128, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 128, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k128_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k128_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k128_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| 1dd8d091f009b2a378c5c29bd832406b10fd466d.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
using namespace PyTorchMemEffAttention;
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128, true>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x128_k128_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x128_k128_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x128_k128_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x128_k128_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 128, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 128, true>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k128_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 128, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 128, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k128_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k128_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k128_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
9da488794f8fe410e3732e7170f78baf67316922.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "psc_cuda.h"
#include "particles_cuda.h"
#include <mrc_ddc_private.h>
#include <mrc_profile.h>
void
psc_cuda_init(void)
{
static bool first_time = true;
if (!first_time)
return;
first_time = false;
int deviceCount;
hipGetDeviceCount(&deviceCount);
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0) {
printf("There is no device supporting CUDA\n");
return;
}
for (int dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
// This function call returns 9999 for both major & minor fields, if no CUDA capable devices are present
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" CUDA Capability Major revision number: %d\n", deviceProp.major);
printf(" CUDA Capability Minor revision number: %d\n", deviceProp.minor);
printf(" Total amount of global memory: %lu bytes\n", deviceProp.totalGlobalMem);
#if CUDART_VERSION >= 2000
printf(" Number of multiprocessors: %d\n", deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n", 8 * deviceProp.multiProcessorCount);
#endif
printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch);
printf(" Texture alignment: %lu bytes\n", deviceProp.textureAlignment);
printf(" Clock rate: %.2f GHz\n", deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 2000
printf(" Concurrent copy and execution: %s\n", deviceProp.deviceOverlap ? "Yes" : "No");
#endif
#if CUDART_VERSION >= 2020
printf(" Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No");
printf(" Integrated: %s\n", deviceProp.integrated ? "Yes" : "No");
printf(" Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No");
printf(" Compute mode: %s\n", deviceProp.computeMode == hipComputeModeDefault ?
"Default (multiple host threads can use this device simultaneously)" :
deviceProp.computeMode == hipComputeModeExclusive ?
"Exclusive (only one host thread at a time can use this device)" :
deviceProp.computeMode == hipComputeModeProhibited ?
"Prohibited (no host thread can use this device)" :
"Unknown");
#endif
}
}
// FIXME, hardcoding is bad, needs to be consistent, etc...
#define MAX_BND_COMPONENTS (3)
EXTERN_C void
__particles_cuda_to_device(struct psc_particles *prts, float4 *xi4, float4 *pxi4)
{
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
struct psc_mparticles *mprts = cuda->mprts;
assert(mprts);
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
unsigned int off = 0;
for (int p = 0; p < prts->p; p++) {
off += psc_mparticles_get_patch(mprts, p)->n_part;
}
int n_part = prts->n_part;
check(hipMemcpy(mprts_cuda->d_xi4 + off, xi4, n_part * sizeof(*xi4),
hipMemcpyHostToDevice));
check(hipMemcpy(mprts_cuda->d_pxi4 + off, pxi4, n_part * sizeof(*pxi4),
hipMemcpyHostToDevice));
}
EXTERN_C void
__particles_cuda_from_device(struct psc_particles *prts, float4 *xi4, float4 *pxi4)
{
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
struct psc_mparticles *mprts = cuda->mprts;
assert(mprts);
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
unsigned int off = 0;
for (int p = 0; p < prts->p; p++) {
off += psc_mparticles_get_patch(mprts, p)->n_part;
}
int n_part = prts->n_part;
check(hipMemcpy(xi4, mprts_cuda->d_xi4 + off, n_part * sizeof(*xi4),
hipMemcpyDeviceToHost));
check(hipMemcpy(pxi4, mprts_cuda->d_pxi4 + off, n_part * sizeof(*pxi4),
hipMemcpyDeviceToHost));
}
EXTERN_C void
cuda_copy_bidx_from_dev(struct psc_particles *prts, unsigned int *h_bidx, unsigned int *d_bidx)
{
check(hipMemcpy(h_bidx, d_bidx, prts->n_part * sizeof(*h_bidx),
hipMemcpyDeviceToHost));
}
EXTERN_C void
cuda_copy_bidx_to_dev(struct psc_particles *prts, unsigned int *d_bidx, unsigned int *h_bidx)
{
check(hipMemcpy(d_bidx, h_bidx, prts->n_part * sizeof(*d_bidx),
hipMemcpyHostToDevice));
}
void
__psc_mparticles_cuda_setup(struct psc_mparticles *mprts)
{
psc_cuda_init();
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
if (mprts->nr_patches == 0) {
return;
}
// FIXME we assume that every patch will have those same dims
int *ldims = ppsc->patch[0].ldims;
if (!mprts->flags) {
// FIXME, they get set too late, so auto-dispatch "1vb" doesn't work
mprts->flags = MP_NEED_BLOCK_OFFSETS | MP_BLOCKSIZE_4X4X4 | MP_NO_CHECKERBOARD;
}
int bs[3];
for (int d = 0; d < 3; d++) {
switch (mprts->flags & MP_BLOCKSIZE_MASK) {
case MP_BLOCKSIZE_1X1X1: bs[d] = 1; break;
case MP_BLOCKSIZE_2X2X2: bs[d] = 2; break;
case MP_BLOCKSIZE_4X4X4: bs[d] = 4; break;
case MP_BLOCKSIZE_8X8X8: bs[d] = 8; break;
default: assert(0);
}
if (ppsc->domain.gdims[d] == 1) {
bs[d] = 1;
}
mprts_cuda->blocksize[d] = bs[d];
assert(ldims[d] % bs[d] == 0); // FIXME not sure what breaks if not
mprts_cuda->b_mx[d] = (ldims[d] + bs[d] - 1) / bs[d];
// assumes no AMR
mprts_cuda->b_dxi[d] = 1.f / (mprts_cuda->blocksize[d] * ppsc->patch[0].dx[d]);
}
mprts_cuda->nr_blocks = mprts_cuda->b_mx[0] * mprts_cuda->b_mx[1] * mprts_cuda->b_mx[2];
mprts_cuda->nr_total_blocks = mprts->nr_patches * mprts_cuda->nr_blocks;
mprts_cuda->h_dev = new particles_cuda_dev_t[mprts->nr_patches];
check(hipMalloc(&mprts_cuda->d_dev,
mprts->nr_patches * sizeof(*mprts_cuda->d_dev)));
mprts_cuda->nr_prts = 0;
for (int p = 0; p < mprts->nr_patches; p++) {
struct psc_particles *prts = psc_mparticles_get_patch(mprts, p);
struct psc_particles_cuda *prts_cuda = psc_particles_cuda(prts);
mprts_cuda->nr_prts += prts->n_part;
prts_cuda->mprts = mprts;
}
mprts_cuda->h_bnd_cnt = new unsigned int[mprts_cuda->nr_total_blocks];
unsigned int nr_alloced = mprts_cuda->nr_prts * 1.2;
mprts_cuda->nr_alloced = nr_alloced;
check(hipMalloc((void **) &mprts_cuda->d_xi4, nr_alloced * sizeof(float4)));
check(hipMalloc((void **) &mprts_cuda->d_pxi4, nr_alloced * sizeof(float4)));
check(hipMalloc((void **) &mprts_cuda->d_alt_xi4, nr_alloced * sizeof(float4)));
check(hipMalloc((void **) &mprts_cuda->d_alt_pxi4, nr_alloced * sizeof(float4)));
check(hipMalloc((void **) &mprts_cuda->d_bidx, nr_alloced * sizeof(unsigned int)));
check(hipMalloc((void **) &mprts_cuda->d_alt_bidx, nr_alloced * sizeof(unsigned int)));
check(hipMalloc((void **) &mprts_cuda->d_ids, nr_alloced * sizeof(unsigned int)));
check(hipMalloc((void **) &mprts_cuda->d_sums, nr_alloced * sizeof(unsigned int)));
check(hipMalloc((void **) &mprts_cuda->d_off,
(mprts_cuda->nr_total_blocks + 1) * sizeof(*mprts_cuda->d_off)));
check(hipMalloc((void **) &mprts_cuda->d_bnd_spine_cnts,
(1 + mprts_cuda->nr_total_blocks * (CUDA_BND_STRIDE + 1)) * sizeof(unsigned int)));
check(hipMalloc((void **) &mprts_cuda->d_bnd_spine_sums,
(1 + mprts_cuda->nr_total_blocks * (CUDA_BND_STRIDE + 1)) * sizeof(unsigned int)));
for (int p = 0; p < mprts->nr_patches; p++) {
struct psc_particles *prts = psc_mparticles_get_patch(mprts, p);
struct psc_particles_cuda *prts_cuda = psc_particles_cuda(prts);
prts_cuda->h_dev = &mprts_cuda->h_dev[p];
prts_cuda->d_dev = &mprts_cuda->d_dev[p];
}
}
void
__psc_mparticles_cuda_free(struct psc_mparticles *mprts)
{
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
delete[] mprts_cuda->h_dev;
delete[] mprts_cuda->h_bnd_cnt;
check(hipFree(mprts_cuda->d_xi4));
check(hipFree(mprts_cuda->d_pxi4));
check(hipFree(mprts_cuda->d_alt_xi4));
check(hipFree(mprts_cuda->d_alt_pxi4));
check(hipFree(mprts_cuda->d_bidx));
check(hipFree(mprts_cuda->d_alt_bidx));
check(hipFree(mprts_cuda->d_ids));
check(hipFree(mprts_cuda->d_sums));
check(hipFree(mprts_cuda->d_off));
check(hipFree(mprts_cuda->d_bnd_spine_cnts));
check(hipFree(mprts_cuda->d_bnd_spine_sums));
check(hipFree(mprts_cuda->d_dev));
}
// ======================================================================
// ======================================================================
// fields
void
__psc_mfields_cuda_setup(struct psc_mfields *mflds)
{
assert(!ppsc->domain.use_pml);
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
unsigned int total_size = 0;
unsigned int buf_size = 0;
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
if (p == 0) {
for (int d = 0; d < 3; d++) {
mflds_cuda->im[d] = flds->im[d];
mflds_cuda->ib[d] = flds->ib[d];
}
} else {
for (int d = 0; d < 3; d++) {
assert(mflds_cuda->im[d] == flds->im[d]);
assert(mflds_cuda->ib[d] == flds->ib[d]);
}
}
unsigned int size = flds->im[0] * flds->im[1] * flds->im[2];
total_size += size;
if (flds->im[0] == 1) {;// + 2*BND) {
int B = 2*BND;
buf_size = 2*B * (flds->im[1] + flds->im[2] - 2*B);
} else {
assert(0);
}
}
mprintf("nr_fields %d tsize %d\n", mflds->nr_fields, total_size);
check(hipMalloc((void **) &mflds_cuda->d_flds,
mflds->nr_fields * total_size * sizeof(float)));
check(hipMalloc((void **) &mflds_cuda->d_bnd_buf,
MAX_BND_COMPONENTS * buf_size * mflds->nr_patches * sizeof(float)));
mflds_cuda->h_bnd_buf = new float[MAX_BND_COMPONENTS * mflds->nr_patches * buf_size];
float *d_flds = mflds_cuda->d_flds;
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
assert(psc_fields_ops(flds) == &psc_fields_cuda_ops);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
unsigned int size = flds->im[0] * flds->im[1] * flds->im[2];
flds_cuda->d_flds = d_flds;
assert(d_flds == mflds_cuda->d_flds + p * flds->nr_comp * size);
d_flds += flds->nr_comp * size;
struct psc_fields_cuda_bnd *cf = &flds_cuda->bnd;
int sz = 1;
for (int d = 0; d < 3; d++) {
if (flds->im[d] == 1 - 2 * flds->ib[d]) { // only 1 non-ghost point
cf->im[d] = 1;
cf->ib[d] = 0;
} else {
cf->im[d] = flds->im[d];
cf->ib[d] = flds->ib[d];
}
sz *= cf->im[d];
}
cf->arr = new float [MAX_BND_COMPONENTS * sz];
cf->arr_off = cf->arr
- ((cf->ib[2] * cf->im[1] + cf->ib[1]) * cf->im[0] + cf->ib[0]);
}
}
void
__psc_mfields_cuda_destroy(struct psc_mfields *mflds)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
check(hipFree(mflds_cuda->d_flds));
check(hipFree(mflds_cuda->d_bnd_buf));
check(hipFree(mflds_cuda->d_nei_patch));
check(hipFree(mflds_cuda->d_map_out));
check(hipFree(mflds_cuda->d_map_in));
delete[] mflds_cuda->h_bnd_buf;
delete[] mflds_cuda->h_nei_patch;
delete[] mflds_cuda->h_map_out;
delete[] mflds_cuda->h_map_in;
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
struct psc_fields_cuda_bnd *cf = &flds_cuda->bnd;
delete[] cf->arr;
}
}
EXTERN_C void
__fields_cuda_to_device(struct psc_fields *pf, real *h_flds, int mb, int me)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
unsigned int size = pf->im[0] * pf->im[1] * pf->im[2];
check(hipMemcpy(pfc->d_flds + mb * size,
h_flds + mb * size,
(me - mb) * size * sizeof(float),
hipMemcpyHostToDevice));
}
EXTERN_C void
__fields_cuda_from_device(struct psc_fields *pf, real *h_flds, int mb, int me)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
unsigned int size = pf->im[0] * pf->im[1] * pf->im[2];
check(hipMemcpy(h_flds + mb * size,
pfc->d_flds + mb * size,
(me - mb) * size * sizeof(float),
hipMemcpyDeviceToHost));
}
// ======================================================================
enum {
PACK,
UNPACK,
};
// ======================================================================
// fields_device_pack
// FIXME/OPT: can probably be accelerated by making component the fast index
template<int B, int WHAT, int NR_COMPONENTS>
__global__ static void
k_fields_device_pack_yz(real *d_buf, real *d_flds, int gmy, int gmz,
int nr_patches, int nr_fields)
{
unsigned int buf_size = 2*B * (gmy + gmz - 2*B);
int gmx = 1;//2*BND + 1;
int jx = 0;//BND;
int tid = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
int n_threads = NR_COMPONENTS * buf_size;
int p = tid / n_threads;
if (p >= nr_patches)
return;
int n = tid - p * n_threads;
int m = n / buf_size; n -= m * buf_size;
int jz, jy;
if (n < B * gmy) {
jz = n / gmy; n -= jz * gmy;
jy = n;
} else if (n < B * gmy + (gmz - 2*B) * 2*B) {
n -= B * gmy;
jz = n / (2*B); n -= jz * 2*B;
if (n < B) {
jy = n;
} else {
jy = n + gmy - 2*B;
}
jz += B;
} else {
n -= B * gmy + (gmz - 2*B) * 2*B;
jz = n / gmy; n -= jz * gmy;
jy = n;
jz += gmz - B;
}
// FIXME, should use F3_DEV_YZ
if (WHAT == PACK) {
d_buf[tid] = d_flds[(((p * nr_fields + m) * gmz + jz) * gmy + jy) * gmx + jx];
} else if (WHAT == UNPACK) {
d_flds[(((p * nr_fields + m) * gmz + jz) * gmy + jy) * gmx + jx] = d_buf[tid];
}
}
template<int B, int WHAT, int NR_COMPONENTS>
__global__ static void
k_fields_device_pack2_yz(real *d_buf, real *d_flds, int *d_nei_patch_by_dir1,
int gmy, int gmz, int nr_patches, int nr_fields)
{
unsigned int nr_ghosts = 2*B * (gmy + gmz - 2*B);
int tid = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
int p = tid / nr_ghosts;
if (p >= nr_patches)
return;
int n = tid - p * nr_ghosts;
int jy, jz;
int diry, dirz;
if (n < 2*B * gmy) {
jz = n / gmy;
jy = n - jz * gmy;
if (jy < B) {
diry = -1;
} else if (jy < gmy - B) {
diry = 0;
} else {
diry = 1;
}
if (jz < B) {
dirz = -1;
} else {
dirz = 1;
jz += gmz - 2*B;
}
} else {
n -= 2*B * gmy;
jz = n / (2*B) + B;
jy = n % (2*B);
dirz = 0;
if (jy < B) {
diry = -1;
} else {
diry = 1;
jy += gmy - 2*B;
}
}
int s_p = d_nei_patch_by_dir1[p*9 + 3*dirz + diry + 4];
// copy only ghost areas that interface with remote patches
if (1||s_p < 0) {
for (int m = 0; m < NR_COMPONENTS; m++) {
// FIXME, should use F3_DEV_YZ
if (WHAT == PACK) {
d_buf[m * nr_ghosts * nr_patches + tid] = d_flds[((p * nr_fields + m) * gmz + jz) * gmy + jy];
} else if (WHAT == UNPACK) {
d_flds[((p * nr_fields + m) * gmz + jz) * gmy + jy] = d_buf[m * nr_ghosts * nr_patches + tid];
}
}
}
}
template<int B, int NR_COMPONENTS>
__global__ static void
k_fill_ghosts_local_yz(float *d_flds, int *d_nei_patch_by_dir1,
int gmy, int gmz, int nr_fields, int nr_patches)
{
int nr_ghosts = 2*B * (gmy + gmz - 2*B);
int tid = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
int r_p = tid / nr_ghosts;
if (r_p >= nr_patches)
return;
int n = tid - r_p * nr_ghosts;
int jy, jz;
int diry, dirz;
if (n < 2*B * gmy) {
jz = n / gmy;
jy = n - jz * gmy;
if (jy < B) {
diry = -1;
} else if (jy < gmy - B) {
diry = 0;
} else {
diry = 1;
}
if (jz < B) {
dirz = -1;
} else {
dirz = 1;
jz += gmz - 2*B;
}
} else {
n -= 2*B * gmy;
jz = n / (2*B) + B;
jy = n % (2*B);
dirz = 0;
if (jy < B) {
diry = -1;
} else {
diry = 1;
jy += gmy - 2*B;
}
}
int s_p = d_nei_patch_by_dir1[r_p*9 + 3*dirz + diry + 4];
if (s_p >= 0) {
float *r_f = &d_flds[((r_p * nr_fields) * gmz) * gmy];
float *s_f = &d_flds[((s_p * nr_fields)
* gmz - dirz * (gmz - 2*2))
* gmy - diry * (gmy - 2*2)];
for (int m = 0; m < NR_COMPONENTS; m++) {
int i = (m * gmz + jz) * gmy + jy;
r_f[i] = s_f[i];
}
}
}
template<int B, bool pack>
static void
fields_device_pack_yz(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
unsigned int size = mflds_cuda->im[0] * mflds_cuda->im[1] * mflds_cuda->im[2];
int gmy = mflds_cuda->im[1], gmz = mflds_cuda->im[2];
unsigned int buf_size = 2*B * (gmy + gmz - 2*B);
int n_threads = buf_size * (me - mb) * mflds->nr_patches;
dim3 dimGrid((n_threads + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK);
dim3 dimBlock(THREADS_PER_BLOCK);
const int NR_COMPONENTS = 3;
assert(me - mb == NR_COMPONENTS);
float *d_bnd_buf = mflds_cuda->d_bnd_buf;
float *d_flds = mflds_cuda->d_flds + mb * size;
hipLaunchKernelGGL(( k_fields_device_pack_yz<B, pack, NR_COMPONENTS>) , dim3(dimGrid), dim3(dimBlock), 0, 0,
d_bnd_buf, d_flds, gmy, gmz, mflds->nr_patches,
mflds->nr_fields);
cuda_sync_if_enabled();
}
template<int B, bool pack>
static void
fields_device_pack2_yz(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
const int NR_COMPONENTS = 3;
assert(me - mb == NR_COMPONENTS);
int *im = mflds_cuda->im;
assert(im[0] == 1);
int nr_fields = mflds->nr_fields;
int nr_patches = mflds->nr_patches;
int nr_ghosts = 2*B * (im[1] + im[2] - 2*B);
int nr_threads = nr_ghosts * nr_patches;
dim3 dimGrid((nr_threads + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK);
dim3 dimBlock(THREADS_PER_BLOCK);
float *d_flds = mflds_cuda->d_flds + mb * im[1] * im[2];
hipLaunchKernelGGL(( k_fields_device_pack2_yz<B, pack, NR_COMPONENTS>) , dim3(dimGrid), dim3(dimBlock), 0, 0,
mflds_cuda->d_bnd_buf, d_flds, mflds_cuda->d_nei_patch,
im[1], im[2], nr_patches, nr_fields);
cuda_sync_if_enabled();
}
EXTERN_C void
__fields_cuda_fill_ghosts_local(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
const int B = 2;
const int NR_COMPONENTS = 3;
assert(me - mb == NR_COMPONENTS);
int *im = mflds_cuda->im;
assert(im[0] == 1);
int nr_fields = mflds->nr_fields;
int nr_patches = mflds->nr_patches;
int nr_ghosts = 2*B * (im[1] + im[2] - 2*B);
int nr_threads = nr_ghosts * nr_patches;
dim3 dimGrid((nr_threads + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK);
dim3 dimBlock(THREADS_PER_BLOCK);
float *d_flds = mflds_cuda->d_flds + mb * im[1] * im[2];
hipLaunchKernelGGL(( k_fill_ghosts_local_yz<B, NR_COMPONENTS>) , dim3(dimGrid), dim3(dimBlock), 0, 0,
d_flds, mflds_cuda->d_nei_patch, im[1], im[2],
nr_fields, nr_patches);
cuda_sync_if_enabled();
#if 0
thrust::device_ptr<float> d_flds(mflds_cuda->d_flds);
thrust::host_vector<float> h_flds(d_flds, d_flds + nr_patches * nr_fields * im[2] *im[2]);
for (int tid = 0; tid < nr_threads; tid++) {
cuda_fill_ghosts_local_gold(&h_flds[0], nei_patch_by_dir1, mb, me, im, nr_fields,
nr_patches, nr_ghosts, tid);
}
thrust::copy(h_flds.begin(), h_flds.end(), d_flds);
#endif
}
// ======================================================================
// fields_host_pack
#define WHAT do { \
if (what == PACK) { \
h_buf[tid++] = F3_CF_0(cf, m, 0,jy,jz); \
} else if (what == UNPACK) { \
F3_CF_0(cf, m, 0,jy,jz) = h_buf[tid++]; \
} \
} while(0)
template<int B, int what>
static void
fields_host_pack_yz(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
int gmy = mflds_cuda->im[1], gmz = mflds_cuda->im[2];
unsigned int buf_size = 2*B * (gmy + gmz - 2*B);
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
struct psc_fields_cuda_bnd *cf = &flds_cuda->bnd;
real *h_buf = mflds_cuda->h_bnd_buf + p * buf_size * MAX_BND_COMPONENTS;
int gmy = cf->im[1], gmz = cf->im[2];
int tid = 0;
for (int m = 0; m < me - mb; m++) {
for (int jz = 0; jz < B; jz++) {
for (int jy = 0; jy < gmy; jy++) {
WHAT;
}
}
for (int jz = B; jz < gmz - B; jz++) {
for (int jy = 0; jy < B; jy++) {
WHAT;
}
for (int jy = gmy - B; jy < gmy; jy++) {
WHAT;
}
}
for (int jz = gmz - B; jz < gmz; jz++) {
for (int jy = 0; jy < gmy; jy++) {
WHAT;
}
}
}
}
}
template<int B, int what>
static void
fields_host_pack2_yz(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
real *h_buf = mflds_cuda->h_bnd_buf;
int tid = 0;
for (int m = 0; m < me - mb; m++) {
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
struct psc_fields_cuda_bnd *cf = &flds_cuda->bnd;
int gmy = cf->im[1], gmz = cf->im[2];
for (int jz = 0; jz < B; jz++) {
for (int jy = 0; jy < gmy; jy++) {
WHAT;
}
}
for (int jz = gmz - B; jz < gmz; jz++) {
for (int jy = 0; jy < gmy; jy++) {
WHAT;
}
}
for (int jz = B; jz < gmz - B; jz++) {
for (int jy = 0; jy < B; jy++) {
WHAT;
}
for (int jy = gmy - B; jy < gmy; jy++) {
WHAT;
}
}
}
}
}
#undef WHAT
template<int B, int what>
static void
fields_host_pack3_yz(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
int *im = mflds_cuda->im;
int nr_fields = mflds->nr_fields;
real *h_buf = mflds_cuda->h_bnd_buf;
int nr_map;
int *h_map;
if (B == 2) {
h_map = mflds_cuda->h_map_out;
nr_map = mflds_cuda->nr_map_out;
} else if (B == 4) {
h_map = mflds_cuda->h_map_in;
nr_map = mflds_cuda->nr_map_in;
} else {
assert(0);
}
for (int tid = 0; tid < nr_map; tid++) {
int i = h_map[tid];
int p = i / (nr_fields * im[2] * im[1]);
int off = i - p * (nr_fields * im[2] * im[1]);
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
struct psc_fields_cuda_bnd *cf = &flds_cuda->bnd;
for (int m = 0; m < me - mb; m++) {
if (what == PACK) {
h_buf[tid + m * nr_map] = cf->arr[off + m * im[2] * im[1]];
} else if (what == UNPACK) {
cf->arr[off + m * im[2] * im[1]] = h_buf[tid + m * nr_map];
}
}
}
}
template<int B>
static void
__fields_cuda_from_device_yz(struct psc_mfields *mflds, int mb, int me)
{
static int pr1, pr2, pr3;
if (!pr1) {
pr1 = prof_register("field_device_pack", 1., 0, 0);
pr2 = prof_register("cuda_memcpy", 1., 0, 0);
pr3 = prof_register("field_host_unpack", 1., 0, 0);
}
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
int gmy = mflds_cuda->im[1], gmz = mflds_cuda->im[2];
unsigned int buf_size = 2*B * (gmy + gmz - 2*B);
assert(me - mb <= MAX_BND_COMPONENTS);
assert(mflds_cuda->ib[1] == -BND);
assert(mflds_cuda->im[1] >= 2 * B);
assert(mflds_cuda->im[2] >= 2 * B);
prof_start(pr1);
fields_device_pack_yz<B, PACK>(mflds, mb, me);
prof_stop(pr1);
prof_start(pr2);
check(hipMemcpy(mflds_cuda->h_bnd_buf, mflds_cuda->d_bnd_buf,
MAX_BND_COMPONENTS * buf_size * mflds->nr_patches *
sizeof(*mflds_cuda->h_bnd_buf),
hipMemcpyDeviceToHost));
prof_stop(pr2);
prof_start(pr3);
fields_host_pack_yz<B, UNPACK>(mflds, mb, me);
prof_stop(pr3);
}
template<int B>
static void
__fields_cuda_to_device_yz(struct psc_mfields *mflds, int mb, int me)
{
static int pr1, pr2, pr3;
if (!pr1) {
pr1 = prof_register("field_host_pack", 1., 0, 0);
pr2 = prof_register("cuda_memcpy", 1., 0, 0);
pr3 = prof_register("field_device_unpack", 1., 0, 0);
}
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
int gmy = mflds_cuda->im[1], gmz = mflds_cuda->im[2];
unsigned int buf_size = 2*B * (gmy + gmz - 2*B);
assert(me - mb <= MAX_BND_COMPONENTS);
assert(mflds_cuda->ib[1] == -BND);
assert(mflds_cuda->im[1] >= 2 * B);
assert(mflds_cuda->im[2] >= 2 * B);
prof_start(pr1);
fields_host_pack_yz<B, PACK>(mflds, mb, me);
prof_stop(pr1);
prof_start(pr2);
check(hipMemcpy(mflds_cuda->d_bnd_buf, mflds_cuda->h_bnd_buf,
MAX_BND_COMPONENTS * buf_size * mflds->nr_patches *
sizeof(*mflds_cuda->d_bnd_buf),
hipMemcpyHostToDevice));
prof_stop(pr2);
prof_start(pr3);
fields_device_pack_yz<B, UNPACK>(mflds, mb, me);
prof_stop(pr3);
}
template<int B, bool pack>
static void fields_device_pack3_yz(struct psc_mfields *mflds, int mb, int me);
template<int B>
static void
__fields_cuda_from_device3_yz(struct psc_mfields *mflds, int mb, int me)
{
static int pr1, pr2, pr3;
if (!pr1) {
pr1 = prof_register("field_device_pack 3i", 1., 0, 0);
pr2 = prof_register("cuda_memcpy 3i", 1., 0, 0);
pr3 = prof_register("field_host_unpack 3i", 1., 0, 0);
}
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
assert(me - mb <= MAX_BND_COMPONENTS);
assert(mflds_cuda->ib[1] == -BND);
assert(mflds_cuda->im[1] >= 2 * B);
assert(mflds_cuda->im[2] >= 2 * B);
int nr_map;
if (B == 4) {
nr_map = mflds_cuda->nr_map_in;
} else {
assert(0);
}
prof_start(pr1);
fields_device_pack3_yz<B, PACK>(mflds, mb, me);
prof_stop(pr1);
prof_start(pr2);
assert(B == 4);
check(hipMemcpy(mflds_cuda->h_bnd_buf, mflds_cuda->d_bnd_buf,
(me - mb) * nr_map * sizeof(*mflds_cuda->h_bnd_buf),
hipMemcpyDeviceToHost));
prof_stop(pr2);
prof_start(pr3);
fields_host_pack3_yz<B, UNPACK>(mflds, mb, me);
prof_stop(pr3);
}
template<int B>
static void
__fields_cuda_to_device3_yz(struct psc_mfields *mflds, int mb, int me)
{
static int pr1, pr2, pr3;
if (!pr1) {
pr1 = prof_register("field_host_pack 3o", 1., 0, 0);
pr2 = prof_register("cuda_memcpy 3o", 1., 0, 0);
pr3 = prof_register("field_device_unpack 3o", 1., 0, 0);
}
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
assert(me - mb <= MAX_BND_COMPONENTS);
assert(mflds_cuda->ib[1] == -BND);
assert(mflds_cuda->im[1] >= 2 * B);
assert(mflds_cuda->im[2] >= 2 * B);
int nr_map;
if (B == 2) {
nr_map = mflds_cuda->nr_map_out;
} else {
assert(0);
}
prof_start(pr1);
fields_host_pack3_yz<B, PACK>(mflds, mb, me);
prof_stop(pr1);
prof_start(pr2);
check(hipMemcpy(mflds_cuda->d_bnd_buf, mflds_cuda->h_bnd_buf,
(me - mb) * nr_map * sizeof(*mflds_cuda->d_bnd_buf),
hipMemcpyHostToDevice));
prof_stop(pr2);
prof_start(pr3);
fields_device_pack3_yz<B, UNPACK>(mflds, mb, me);
prof_stop(pr3);
}
// ======================================================================
EXTERN_C void
__fields_cuda_from_device_inside(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
if (mflds_cuda->im[0] == 2 * -mflds_cuda->ib[0] + 1) {
__fields_cuda_from_device_yz<2*BND>(mflds, mb, me);
} else {
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
unsigned int size = flds->im[0] * flds->im[1] * flds->im[2];
check(hipMemcpy(flds_cuda->bnd.arr,
flds_cuda->d_flds + mb * size,
(me - mb) * size * sizeof(float),
hipMemcpyDeviceToHost));
}
}
}
EXTERN_C void
__fields_cuda_from_device_inside_only(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
if (mflds_cuda->im[0] == 2 * -mflds_cuda->ib[0] + 1) {
__fields_cuda_from_device3_yz<2*BND>(mflds, mb, me);
} else {
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
unsigned int size = flds->im[0] * flds->im[1] * flds->im[2];
check(hipMemcpy(flds_cuda->bnd.arr,
flds_cuda->d_flds + mb * size,
(me - mb) * size * sizeof(float),
hipMemcpyDeviceToHost));
}
}
}
EXTERN_C void
__fields_cuda_to_device_outside(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
if (mflds_cuda->im[0] == 2 * -mflds_cuda->ib[0] + 1) {
__fields_cuda_to_device3_yz<BND>(mflds, mb, me);
} else {
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
unsigned int size = flds->im[0] * flds->im[1] * flds->im[2];
check(hipMemcpy(flds_cuda->d_flds + mb * size,
flds_cuda->bnd.arr,
(me - mb) * size * sizeof(float),
hipMemcpyHostToDevice));
}
}
}
EXTERN_C void
__fields_cuda_to_device_inside(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
if (mflds_cuda->im[0] == 2 * -mflds_cuda->ib[0] + 1) {
__fields_cuda_to_device_yz<2*BND>(mflds, mb, me);
} else {
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
unsigned int size = flds->im[0] * flds->im[1] * flds->im[2];
check(hipMemcpy(flds_cuda->d_flds + mb * size,
flds_cuda->bnd.arr,
(me - mb) * size * sizeof(float),
hipMemcpyHostToDevice));
}
}
}
void
cuda_fill_ghosts_local_gold(float *h_flds, int *nei_patch_by_dir1, int mb, int me,
int *im, int nr_fields, int nr_patches, int nr_ghosts, int threadIdx)
{
int iy, iz;
int diry, dirz;
int r_p = threadIdx / nr_ghosts;
if (r_p >= nr_patches)
return;
int tid = threadIdx - nr_ghosts * r_p;
if (tid < 4 * im[1]) {
iy = tid % im[1];
iz = tid / im[1];
if (iy < 2) {
diry = -1;
} else if (iy < im[1] - 2) {
diry = 0;
} else {
diry = 1;
}
if (iz < 2) {
dirz = -1;
} else {
dirz = 1;
iz += im[2] - 2*2;
}
} else {
int tid2 = tid - 4 * im[1];
iy = tid2 % 4;
iz = tid2 / 4 + 2;
dirz = 0;
if (iy < 2) {
diry = -1;
} else {
diry = 1;
iy += im[1] - 2*2;
}
}
int s_p = nei_patch_by_dir1[r_p*9 + 3*dirz + diry + 4];
if (s_p >= 0) {
float *r_f = &h_flds[((r_p * nr_fields + mb) * im[2]) * im[1]];
float *s_f = &h_flds[((s_p * nr_fields + mb)
* im[2] - dirz * (im[2] - 2*2))
* im[1] - diry * (im[1] - 2*2)];
for (int m = 0; m < me - mb; m++) {
int i = (m * im[2] + iz) * im[1] + iy;
r_f[i] = s_f[i];
}
}
}
static void fields_create_map_out_yz(struct psc_mfields *mflds, int B, int *p_nr_map, int **p_h_map);
static void fields_create_map_in_yz(struct psc_mfields *mflds, int B, int *p_nr_map, int **p_h_map);
EXTERN_C void
__fields_cuda_fill_ghosts_setup(struct psc_mfields *mflds, struct mrc_ddc *ddc)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
int *im = mflds_cuda->im;
assert(im[0] == 1);
int nr_patches = mflds->nr_patches;
if (!mflds_cuda->h_nei_patch) {
struct mrc_ddc_multi *multi = to_mrc_ddc_multi(ddc);
struct mrc_ddc_pattern2 *patt2 = &multi->fill_ghosts2;
struct mrc_ddc_rank_info *ri = patt2->ri;
mflds_cuda->h_nei_patch = new int[9 * nr_patches];
for (int p = 0; p < nr_patches; p++) {
for (int dir1 = 0; dir1 < 9; dir1++) {
mflds_cuda->h_nei_patch[p * 9 + dir1] = -1;
}
}
for (int i = 0; i < ri[multi->mpi_rank].n_recv_entries; i++) {
struct mrc_ddc_sendrecv_entry *re = &ri[multi->mpi_rank].recv_entry[i];
mflds_cuda->h_nei_patch[re->patch * 9 + re->dir1 / 3] = re->nei_patch;
}
check(hipMalloc((void **) &mflds_cuda->d_nei_patch,
9 * nr_patches * sizeof(*mflds_cuda->d_nei_patch)));
check(hipMemcpy(mflds_cuda->d_nei_patch, mflds_cuda->h_nei_patch,
9 * nr_patches * sizeof(*mflds_cuda->d_nei_patch),
hipMemcpyHostToDevice));
fields_create_map_out_yz(mflds, 2, &mflds_cuda->nr_map_out, &mflds_cuda->h_map_out);
check(hipMalloc((void **) &mflds_cuda->d_map_out,
mflds_cuda->nr_map_out * sizeof(*mflds_cuda->d_map_out)));
check(hipMemcpy(mflds_cuda->d_map_out, mflds_cuda->h_map_out,
mflds_cuda->nr_map_out * sizeof(*mflds_cuda->d_map_out),
hipMemcpyHostToDevice));
fields_create_map_in_yz(mflds, 2, &mflds_cuda->nr_map_in, &mflds_cuda->h_map_in);
mprintf("map_out %d\n", mflds_cuda->nr_map_out);
mprintf("map_in %d\n", mflds_cuda->nr_map_in);
check(hipMalloc((void **) &mflds_cuda->d_map_in,
mflds_cuda->nr_map_in * sizeof(*mflds_cuda->d_map_in)));
check(hipMemcpy(mflds_cuda->d_map_in, mflds_cuda->h_map_in,
mflds_cuda->nr_map_in * sizeof(*mflds_cuda->d_map_in),
hipMemcpyHostToDevice));
}
}
template<int WHAT>
static void
g_fields_device_pack3_yz(int tid, real *d_buf, real *d_flds, int *d_map, int *d_nei_patch_by_dir1,
int gmy, int gmz, int nr_patches, int nr_components, int nr_map)
{
if (tid >= nr_map)
return;
// copy only ghost areas that interface with remote patches
int i = d_map[tid];
for (int m = 0; m < nr_components; m++) {
// FIXME, should use F3_DEV_YZ
if (WHAT == PACK) {
d_buf[m * nr_map + tid] = d_flds[i + m * gmz * gmy];
} else if (WHAT == UNPACK) {
d_flds[i + m * gmz * gmy] = d_buf[m * nr_map + tid];
}
}
}
template<int WHAT>
__global__ static void
k_fields_device_pack3_yz(real *d_buf, real *d_flds, int *d_map, int *d_nei_patch_by_dir1,
int gmy, int gmz, int nr_patches, int nr_components, int nr_map)
{
int tid = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if (tid >= nr_map)
return;
// copy only ghost areas that interface with remote patches
int i = d_map[tid];
for (int m = 0; m < nr_components; m++) {
// FIXME, should use F3_DEV_YZ
if (WHAT == PACK) {
d_buf[m * nr_map + tid] = d_flds[i + m * gmz * gmy];
} else if (WHAT == UNPACK) {
d_flds[i + m * gmz * gmy] = d_buf[m * nr_map + tid];
}
}
}
#undef check
#undef _GLIBCXX_USE_INT128
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
static void
fields_create_map_out_yz(struct psc_mfields *mflds, int B, int *p_nr_map, int **p_h_map)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
bool remote_only = true;
int *im = mflds_cuda->im;
int nr_patches = mflds->nr_patches;
int nr_fields = mflds->nr_fields;
int nr_map = 0;
for (int p = 0; p < nr_patches; p++) {
int dir[3];
for (dir[2] = -1; dir[2] <= 1; dir[2]++) {
for (dir[1] = -1; dir[1] <= 1; dir[1]++) {
if (dir[1] == 0 && dir[2] == 0) {
continue;
}
int s_p = mflds_cuda->h_nei_patch[p*9 + 3*dir[2] + dir[1] + 4];
if (!remote_only || s_p < 0) {
nr_map += ((dir[1] == 0 ? im[1] - 2*B : B) *
(dir[2] == 0 ? im[2] - 2*B : B));
}
}
}
}
*p_nr_map = nr_map;
*p_h_map = new int[nr_map];
int tid = 0;
for (int p = 0; p < nr_patches; p++) {
for (int jjz = 0; jjz < 2*B; jjz++) {
int jz = jjz;
int dirz;
if (jz < B) {
dirz = -1;
} else {
dirz = 1;
jz += im[2] - 2*B;
}
for (int jy = 0; jy < im[1]; jy++) {
int diry;
if (jy < B) {
diry = -1;
} else if (jy < im[1] - B) {
diry = 0;
} else {
diry = 1;
}
int s_p = mflds_cuda->h_nei_patch[p*9 + 3*dirz + diry + 4];
if (!remote_only || s_p < 0) {
(*p_h_map)[tid++] = ((p * nr_fields + 0) * im[2] + jz) * im[1] + jy;
}
}
}
for (int jz = B; jz < im[2] - B; jz++) {
int dirz = 0;
for (int jjy = 0; jjy < 2*B; jjy++) {
int jy = jjy;
int diry;
if (jy < B) {
diry = -1;
} else {
diry = 1;
jy += im[1] - 2*B;
}
int s_p = mflds_cuda->h_nei_patch[p*9 + 3*dirz + diry + 4];
if (!remote_only || s_p < 0) {
(*p_h_map)[tid++] = ((p * nr_fields + 0) * im[2] + jz) * im[1] + jy;
}
}
}
}
assert(tid == nr_map);
}
static void
fields_create_map_in_yz(struct psc_mfields *mflds, int B, int *p_nr_map, int **p_h_map)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
bool remote_only = true;
int *im = mflds_cuda->im;
int ldims[3] = { 1, im[1] - 2*2, im[2] - 2*2 };
int nr_patches = mflds->nr_patches;
int nr_fields = mflds->nr_fields;
bool *has_nei = new bool[9 * nr_patches];
// FIXME, we don't need the ghosts here...
for (int p = 0; p < nr_patches; p++) {
int dir[3];
for (dir[2] = -1; dir[2] <= 1; dir[2]++) {
for (dir[1] = -1; dir[1] <= 1; dir[1]++) {
int dir1 = p*9 + 3*dir[2] + dir[1] + 4;
has_nei[dir1] = false;
int p_mm = mflds_cuda->h_nei_patch[p*9 + -1 + 3*-1 + 4];
int p_0m = mflds_cuda->h_nei_patch[p*9 + 0 + 3*-1 + 4];
int p_pm = mflds_cuda->h_nei_patch[p*9 + +1 + 3*-1 + 4];
int p_m0 = mflds_cuda->h_nei_patch[p*9 + -1 + 3* 0 + 4];
int p_p0 = mflds_cuda->h_nei_patch[p*9 + +1 + 3* 0 + 4];
int p_mp = mflds_cuda->h_nei_patch[p*9 + +1 + 3*+1 + 4];
int p_0p = mflds_cuda->h_nei_patch[p*9 + 0 + 3*+1 + 4];
int p_pp = mflds_cuda->h_nei_patch[p*9 + -1 + 3*+1 + 4];
if (dir[1] == 0 && dir[2] == 0) {
} else if (dir[1] == -1 && dir[2] == -1) {
if (p_mm < 0 || p_m0 < 0 || p_0m < 0) {
has_nei[dir1] = true;
}
} else if (dir[1] == 0 && dir[2] == -1) {
if (p_0m < 0) {
has_nei[dir1] = true;
}
} else if (dir[1] == +1 && dir[2] == -1) {
if (p_pm < 0 || p_0m < 0 || p_p0 < 0) {
has_nei[dir1] = true;
}
} else if (dir[1] == -1 && dir[2] == 0) {
if (p_m0 < 0) {
has_nei[dir1] = true;
}
} else if (dir[1] == +1 && dir[2] == 0) {
if (p_p0 < 0) {
has_nei[dir1] = true;
}
} else if (dir[1] == -1 && dir[2] == +1) {
if (p_mp < 0 || p_m0 < 0 || p_0p < 0) {
has_nei[dir1] = true;
}
} else if (dir[1] == 0 && dir[2] == +1) {
if (p_0p < 0) {
has_nei[dir1] = true;
}
} else if (dir[1] == +1 && dir[2] == +1) {
if (p_pp < 0 || p_0p < 0 || p_p0 < 0) {
has_nei[dir1] = true;
}
}
}
}
}
int nr_map = 0;
for (int p = 0; p < nr_patches; p++) {
int dir[3];
for (dir[2] = -1; dir[2] <= 1; dir[2]++) {
for (dir[1] = -1; dir[1] <= 1; dir[1]++) {
if (dir[1] == 0 && dir[2] == 0) {
continue;
}
int dir1 = p*9 + 3*dir[2] + dir[1] + 4;
if (!remote_only || has_nei[dir1]) {
nr_map += ((dir[1] == 0 ? ldims[1] - 2*B : B) *
(dir[2] == 0 ? ldims[2] - 2*B : B));
}
}
}
}
*p_nr_map = nr_map;
*p_h_map = new int[nr_map];
int tid = 0;
for (int p = 0; p < nr_patches; p++) {
for (int jjz = 0; jjz < 2*B; jjz++) {
int jz = jjz;
int dirz;
if (jz < B) {
dirz = -1;
} else {
dirz = 1;
jz += ldims[2] - 2*B;
}
for (int jy = 0; jy < ldims[1]; jy++) {
int diry;
if (jy < B) {
diry = -1;
} else if (jy < ldims[1] - B) {
diry = 0;
} else {
diry = 1;
}
int dir1 = p*9 + 3*dirz + diry + 4;
if (!remote_only || has_nei[dir1]) {
(*p_h_map)[tid++] = ((p * nr_fields + 0) * im[2] + (jz + B)) * im[1] + (jy + B);
}
}
}
for (int jz = B; jz < ldims[2] - B; jz++) {
int dirz = 0;
for (int jjy = 0; jjy < 2*B; jjy++) {
int jy = jjy;
int diry;
if (jy < B) {
diry = -1;
} else {
diry = 1;
jy += ldims[1] - 2*B;
}
int dir1 = p*9 + 3*dirz + diry + 4;
if (!remote_only || has_nei[dir1]) {
(*p_h_map)[tid++] = ((p * nr_fields + 0) * im[2] + (jz + B)) * im[1] + (jy + B);
}
}
}
}
assert(tid == nr_map);
delete[] has_nei;
}
template<int B, bool pack>
static void
fields_device_pack3_yz(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
const int NR_COMPONENTS = 3;
assert(me - mb == NR_COMPONENTS);
int *im = mflds_cuda->im;
assert(im[0] == 1);
int nr_patches = mflds->nr_patches;
int nr_map;
int *d_map;
if (B == 2) {
nr_map = mflds_cuda->nr_map_out;
d_map = mflds_cuda->d_map_out;
} else if (B == 4) {
nr_map = mflds_cuda->nr_map_in;
d_map = mflds_cuda->d_map_in;
} else {
assert(0);
}
#if 1
dim3 dimGrid((nr_map + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK);
dim3 dimBlock(THREADS_PER_BLOCK);
float *d_flds = mflds_cuda->d_flds + mb * im[1] * im[2];
hipLaunchKernelGGL(( k_fields_device_pack3_yz<pack>) , dim3(dimGrid), dim3(dimBlock), 0, 0,
mflds_cuda->d_bnd_buf, d_flds, d_map, mflds_cuda->d_nei_patch,
im[1], im[2], nr_patches, NR_COMPONENTS, nr_map);
cuda_sync_if_enabled();
#else
thrust::device_ptr<float> d_bnd_buf(mflds_cuda->d_bnd_buf);
thrust::device_ptr<float> d_flds(mflds_cuda->d_flds);
thrust::host_vector<float> h_bnd_buf(d_bnd_buf, d_bnd_buf + nr_patches * nr_ghosts * NR_COMPONENTS);
thrust::host_vector<float> h_flds(d_flds, d_flds + nr_patches * nr_fields * im[1] * im[2]);
for (int tid = 0; tid < nr_map; tid++) {
g_fields_device_pack3_yz<pack>
(tid, &h_bnd_buf[0], &h_flds[mb * im[1] * im[2]], &h_map[0], &h_nei_patch_by_dir1[0],
im[1], im[2], nr_patches, NR_COMPONENTS, nr_map);
}
thrust::copy(h_flds.begin(), h_flds.end(), d_flds);
#endif
}
| 9da488794f8fe410e3732e7170f78baf67316922.cu |
#include "psc_cuda.h"
#include "particles_cuda.h"
#include <mrc_ddc_private.h>
#include <mrc_profile.h>
void
psc_cuda_init(void)
{
static bool first_time = true;
if (!first_time)
return;
first_time = false;
int deviceCount;
cudaGetDeviceCount(&deviceCount);
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0) {
printf("There is no device supporting CUDA\n");
return;
}
for (int dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
// This function call returns 9999 for both major & minor fields, if no CUDA capable devices are present
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" CUDA Capability Major revision number: %d\n", deviceProp.major);
printf(" CUDA Capability Minor revision number: %d\n", deviceProp.minor);
printf(" Total amount of global memory: %lu bytes\n", deviceProp.totalGlobalMem);
#if CUDART_VERSION >= 2000
printf(" Number of multiprocessors: %d\n", deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n", 8 * deviceProp.multiProcessorCount);
#endif
printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch);
printf(" Texture alignment: %lu bytes\n", deviceProp.textureAlignment);
printf(" Clock rate: %.2f GHz\n", deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 2000
printf(" Concurrent copy and execution: %s\n", deviceProp.deviceOverlap ? "Yes" : "No");
#endif
#if CUDART_VERSION >= 2020
printf(" Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No");
printf(" Integrated: %s\n", deviceProp.integrated ? "Yes" : "No");
printf(" Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No");
printf(" Compute mode: %s\n", deviceProp.computeMode == cudaComputeModeDefault ?
"Default (multiple host threads can use this device simultaneously)" :
deviceProp.computeMode == cudaComputeModeExclusive ?
"Exclusive (only one host thread at a time can use this device)" :
deviceProp.computeMode == cudaComputeModeProhibited ?
"Prohibited (no host thread can use this device)" :
"Unknown");
#endif
}
}
// FIXME, hardcoding is bad, needs to be consistent, etc...
#define MAX_BND_COMPONENTS (3)
EXTERN_C void
__particles_cuda_to_device(struct psc_particles *prts, float4 *xi4, float4 *pxi4)
{
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
struct psc_mparticles *mprts = cuda->mprts;
assert(mprts);
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
unsigned int off = 0;
for (int p = 0; p < prts->p; p++) {
off += psc_mparticles_get_patch(mprts, p)->n_part;
}
int n_part = prts->n_part;
check(cudaMemcpy(mprts_cuda->d_xi4 + off, xi4, n_part * sizeof(*xi4),
cudaMemcpyHostToDevice));
check(cudaMemcpy(mprts_cuda->d_pxi4 + off, pxi4, n_part * sizeof(*pxi4),
cudaMemcpyHostToDevice));
}
EXTERN_C void
__particles_cuda_from_device(struct psc_particles *prts, float4 *xi4, float4 *pxi4)
{
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
struct psc_mparticles *mprts = cuda->mprts;
assert(mprts);
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
unsigned int off = 0;
for (int p = 0; p < prts->p; p++) {
off += psc_mparticles_get_patch(mprts, p)->n_part;
}
int n_part = prts->n_part;
check(cudaMemcpy(xi4, mprts_cuda->d_xi4 + off, n_part * sizeof(*xi4),
cudaMemcpyDeviceToHost));
check(cudaMemcpy(pxi4, mprts_cuda->d_pxi4 + off, n_part * sizeof(*pxi4),
cudaMemcpyDeviceToHost));
}
EXTERN_C void
cuda_copy_bidx_from_dev(struct psc_particles *prts, unsigned int *h_bidx, unsigned int *d_bidx)
{
check(cudaMemcpy(h_bidx, d_bidx, prts->n_part * sizeof(*h_bidx),
cudaMemcpyDeviceToHost));
}
EXTERN_C void
cuda_copy_bidx_to_dev(struct psc_particles *prts, unsigned int *d_bidx, unsigned int *h_bidx)
{
check(cudaMemcpy(d_bidx, h_bidx, prts->n_part * sizeof(*d_bidx),
cudaMemcpyHostToDevice));
}
void
__psc_mparticles_cuda_setup(struct psc_mparticles *mprts)
{
psc_cuda_init();
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
if (mprts->nr_patches == 0) {
return;
}
// FIXME we assume that every patch will have those same dims
int *ldims = ppsc->patch[0].ldims;
if (!mprts->flags) {
// FIXME, they get set too late, so auto-dispatch "1vb" doesn't work
mprts->flags = MP_NEED_BLOCK_OFFSETS | MP_BLOCKSIZE_4X4X4 | MP_NO_CHECKERBOARD;
}
int bs[3];
for (int d = 0; d < 3; d++) {
switch (mprts->flags & MP_BLOCKSIZE_MASK) {
case MP_BLOCKSIZE_1X1X1: bs[d] = 1; break;
case MP_BLOCKSIZE_2X2X2: bs[d] = 2; break;
case MP_BLOCKSIZE_4X4X4: bs[d] = 4; break;
case MP_BLOCKSIZE_8X8X8: bs[d] = 8; break;
default: assert(0);
}
if (ppsc->domain.gdims[d] == 1) {
bs[d] = 1;
}
mprts_cuda->blocksize[d] = bs[d];
assert(ldims[d] % bs[d] == 0); // FIXME not sure what breaks if not
mprts_cuda->b_mx[d] = (ldims[d] + bs[d] - 1) / bs[d];
// assumes no AMR
mprts_cuda->b_dxi[d] = 1.f / (mprts_cuda->blocksize[d] * ppsc->patch[0].dx[d]);
}
mprts_cuda->nr_blocks = mprts_cuda->b_mx[0] * mprts_cuda->b_mx[1] * mprts_cuda->b_mx[2];
mprts_cuda->nr_total_blocks = mprts->nr_patches * mprts_cuda->nr_blocks;
mprts_cuda->h_dev = new particles_cuda_dev_t[mprts->nr_patches];
check(cudaMalloc(&mprts_cuda->d_dev,
mprts->nr_patches * sizeof(*mprts_cuda->d_dev)));
mprts_cuda->nr_prts = 0;
for (int p = 0; p < mprts->nr_patches; p++) {
struct psc_particles *prts = psc_mparticles_get_patch(mprts, p);
struct psc_particles_cuda *prts_cuda = psc_particles_cuda(prts);
mprts_cuda->nr_prts += prts->n_part;
prts_cuda->mprts = mprts;
}
mprts_cuda->h_bnd_cnt = new unsigned int[mprts_cuda->nr_total_blocks];
unsigned int nr_alloced = mprts_cuda->nr_prts * 1.2;
mprts_cuda->nr_alloced = nr_alloced;
check(cudaMalloc((void **) &mprts_cuda->d_xi4, nr_alloced * sizeof(float4)));
check(cudaMalloc((void **) &mprts_cuda->d_pxi4, nr_alloced * sizeof(float4)));
check(cudaMalloc((void **) &mprts_cuda->d_alt_xi4, nr_alloced * sizeof(float4)));
check(cudaMalloc((void **) &mprts_cuda->d_alt_pxi4, nr_alloced * sizeof(float4)));
check(cudaMalloc((void **) &mprts_cuda->d_bidx, nr_alloced * sizeof(unsigned int)));
check(cudaMalloc((void **) &mprts_cuda->d_alt_bidx, nr_alloced * sizeof(unsigned int)));
check(cudaMalloc((void **) &mprts_cuda->d_ids, nr_alloced * sizeof(unsigned int)));
check(cudaMalloc((void **) &mprts_cuda->d_sums, nr_alloced * sizeof(unsigned int)));
check(cudaMalloc((void **) &mprts_cuda->d_off,
(mprts_cuda->nr_total_blocks + 1) * sizeof(*mprts_cuda->d_off)));
check(cudaMalloc((void **) &mprts_cuda->d_bnd_spine_cnts,
(1 + mprts_cuda->nr_total_blocks * (CUDA_BND_STRIDE + 1)) * sizeof(unsigned int)));
check(cudaMalloc((void **) &mprts_cuda->d_bnd_spine_sums,
(1 + mprts_cuda->nr_total_blocks * (CUDA_BND_STRIDE + 1)) * sizeof(unsigned int)));
for (int p = 0; p < mprts->nr_patches; p++) {
struct psc_particles *prts = psc_mparticles_get_patch(mprts, p);
struct psc_particles_cuda *prts_cuda = psc_particles_cuda(prts);
prts_cuda->h_dev = &mprts_cuda->h_dev[p];
prts_cuda->d_dev = &mprts_cuda->d_dev[p];
}
}
void
__psc_mparticles_cuda_free(struct psc_mparticles *mprts)
{
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
delete[] mprts_cuda->h_dev;
delete[] mprts_cuda->h_bnd_cnt;
check(cudaFree(mprts_cuda->d_xi4));
check(cudaFree(mprts_cuda->d_pxi4));
check(cudaFree(mprts_cuda->d_alt_xi4));
check(cudaFree(mprts_cuda->d_alt_pxi4));
check(cudaFree(mprts_cuda->d_bidx));
check(cudaFree(mprts_cuda->d_alt_bidx));
check(cudaFree(mprts_cuda->d_ids));
check(cudaFree(mprts_cuda->d_sums));
check(cudaFree(mprts_cuda->d_off));
check(cudaFree(mprts_cuda->d_bnd_spine_cnts));
check(cudaFree(mprts_cuda->d_bnd_spine_sums));
check(cudaFree(mprts_cuda->d_dev));
}
// ======================================================================
// ======================================================================
// fields
void
__psc_mfields_cuda_setup(struct psc_mfields *mflds)
{
assert(!ppsc->domain.use_pml);
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
unsigned int total_size = 0;
unsigned int buf_size = 0;
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
if (p == 0) {
for (int d = 0; d < 3; d++) {
mflds_cuda->im[d] = flds->im[d];
mflds_cuda->ib[d] = flds->ib[d];
}
} else {
for (int d = 0; d < 3; d++) {
assert(mflds_cuda->im[d] == flds->im[d]);
assert(mflds_cuda->ib[d] == flds->ib[d]);
}
}
unsigned int size = flds->im[0] * flds->im[1] * flds->im[2];
total_size += size;
if (flds->im[0] == 1) {;// + 2*BND) {
int B = 2*BND;
buf_size = 2*B * (flds->im[1] + flds->im[2] - 2*B);
} else {
assert(0);
}
}
mprintf("nr_fields %d tsize %d\n", mflds->nr_fields, total_size);
check(cudaMalloc((void **) &mflds_cuda->d_flds,
mflds->nr_fields * total_size * sizeof(float)));
check(cudaMalloc((void **) &mflds_cuda->d_bnd_buf,
MAX_BND_COMPONENTS * buf_size * mflds->nr_patches * sizeof(float)));
mflds_cuda->h_bnd_buf = new float[MAX_BND_COMPONENTS * mflds->nr_patches * buf_size];
float *d_flds = mflds_cuda->d_flds;
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
assert(psc_fields_ops(flds) == &psc_fields_cuda_ops);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
unsigned int size = flds->im[0] * flds->im[1] * flds->im[2];
flds_cuda->d_flds = d_flds;
assert(d_flds == mflds_cuda->d_flds + p * flds->nr_comp * size);
d_flds += flds->nr_comp * size;
struct psc_fields_cuda_bnd *cf = &flds_cuda->bnd;
int sz = 1;
for (int d = 0; d < 3; d++) {
if (flds->im[d] == 1 - 2 * flds->ib[d]) { // only 1 non-ghost point
cf->im[d] = 1;
cf->ib[d] = 0;
} else {
cf->im[d] = flds->im[d];
cf->ib[d] = flds->ib[d];
}
sz *= cf->im[d];
}
cf->arr = new float [MAX_BND_COMPONENTS * sz];
cf->arr_off = cf->arr
- ((cf->ib[2] * cf->im[1] + cf->ib[1]) * cf->im[0] + cf->ib[0]);
}
}
void
__psc_mfields_cuda_destroy(struct psc_mfields *mflds)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
check(cudaFree(mflds_cuda->d_flds));
check(cudaFree(mflds_cuda->d_bnd_buf));
check(cudaFree(mflds_cuda->d_nei_patch));
check(cudaFree(mflds_cuda->d_map_out));
check(cudaFree(mflds_cuda->d_map_in));
delete[] mflds_cuda->h_bnd_buf;
delete[] mflds_cuda->h_nei_patch;
delete[] mflds_cuda->h_map_out;
delete[] mflds_cuda->h_map_in;
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
struct psc_fields_cuda_bnd *cf = &flds_cuda->bnd;
delete[] cf->arr;
}
}
EXTERN_C void
__fields_cuda_to_device(struct psc_fields *pf, real *h_flds, int mb, int me)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
unsigned int size = pf->im[0] * pf->im[1] * pf->im[2];
check(cudaMemcpy(pfc->d_flds + mb * size,
h_flds + mb * size,
(me - mb) * size * sizeof(float),
cudaMemcpyHostToDevice));
}
EXTERN_C void
__fields_cuda_from_device(struct psc_fields *pf, real *h_flds, int mb, int me)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
unsigned int size = pf->im[0] * pf->im[1] * pf->im[2];
check(cudaMemcpy(h_flds + mb * size,
pfc->d_flds + mb * size,
(me - mb) * size * sizeof(float),
cudaMemcpyDeviceToHost));
}
// ======================================================================
enum {
PACK,
UNPACK,
};
// ======================================================================
// fields_device_pack
// FIXME/OPT: can probably be accelerated by making component the fast index
template<int B, int WHAT, int NR_COMPONENTS>
__global__ static void
k_fields_device_pack_yz(real *d_buf, real *d_flds, int gmy, int gmz,
int nr_patches, int nr_fields)
{
unsigned int buf_size = 2*B * (gmy + gmz - 2*B);
int gmx = 1;//2*BND + 1;
int jx = 0;//BND;
int tid = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
int n_threads = NR_COMPONENTS * buf_size;
int p = tid / n_threads;
if (p >= nr_patches)
return;
int n = tid - p * n_threads;
int m = n / buf_size; n -= m * buf_size;
int jz, jy;
if (n < B * gmy) {
jz = n / gmy; n -= jz * gmy;
jy = n;
} else if (n < B * gmy + (gmz - 2*B) * 2*B) {
n -= B * gmy;
jz = n / (2*B); n -= jz * 2*B;
if (n < B) {
jy = n;
} else {
jy = n + gmy - 2*B;
}
jz += B;
} else {
n -= B * gmy + (gmz - 2*B) * 2*B;
jz = n / gmy; n -= jz * gmy;
jy = n;
jz += gmz - B;
}
// FIXME, should use F3_DEV_YZ
if (WHAT == PACK) {
d_buf[tid] = d_flds[(((p * nr_fields + m) * gmz + jz) * gmy + jy) * gmx + jx];
} else if (WHAT == UNPACK) {
d_flds[(((p * nr_fields + m) * gmz + jz) * gmy + jy) * gmx + jx] = d_buf[tid];
}
}
template<int B, int WHAT, int NR_COMPONENTS>
__global__ static void
k_fields_device_pack2_yz(real *d_buf, real *d_flds, int *d_nei_patch_by_dir1,
int gmy, int gmz, int nr_patches, int nr_fields)
{
unsigned int nr_ghosts = 2*B * (gmy + gmz - 2*B);
int tid = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
int p = tid / nr_ghosts;
if (p >= nr_patches)
return;
int n = tid - p * nr_ghosts;
int jy, jz;
int diry, dirz;
if (n < 2*B * gmy) {
jz = n / gmy;
jy = n - jz * gmy;
if (jy < B) {
diry = -1;
} else if (jy < gmy - B) {
diry = 0;
} else {
diry = 1;
}
if (jz < B) {
dirz = -1;
} else {
dirz = 1;
jz += gmz - 2*B;
}
} else {
n -= 2*B * gmy;
jz = n / (2*B) + B;
jy = n % (2*B);
dirz = 0;
if (jy < B) {
diry = -1;
} else {
diry = 1;
jy += gmy - 2*B;
}
}
int s_p = d_nei_patch_by_dir1[p*9 + 3*dirz + diry + 4];
// copy only ghost areas that interface with remote patches
if (1||s_p < 0) {
for (int m = 0; m < NR_COMPONENTS; m++) {
// FIXME, should use F3_DEV_YZ
if (WHAT == PACK) {
d_buf[m * nr_ghosts * nr_patches + tid] = d_flds[((p * nr_fields + m) * gmz + jz) * gmy + jy];
} else if (WHAT == UNPACK) {
d_flds[((p * nr_fields + m) * gmz + jz) * gmy + jy] = d_buf[m * nr_ghosts * nr_patches + tid];
}
}
}
}
template<int B, int NR_COMPONENTS>
__global__ static void
k_fill_ghosts_local_yz(float *d_flds, int *d_nei_patch_by_dir1,
int gmy, int gmz, int nr_fields, int nr_patches)
{
int nr_ghosts = 2*B * (gmy + gmz - 2*B);
int tid = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
int r_p = tid / nr_ghosts;
if (r_p >= nr_patches)
return;
int n = tid - r_p * nr_ghosts;
int jy, jz;
int diry, dirz;
if (n < 2*B * gmy) {
jz = n / gmy;
jy = n - jz * gmy;
if (jy < B) {
diry = -1;
} else if (jy < gmy - B) {
diry = 0;
} else {
diry = 1;
}
if (jz < B) {
dirz = -1;
} else {
dirz = 1;
jz += gmz - 2*B;
}
} else {
n -= 2*B * gmy;
jz = n / (2*B) + B;
jy = n % (2*B);
dirz = 0;
if (jy < B) {
diry = -1;
} else {
diry = 1;
jy += gmy - 2*B;
}
}
int s_p = d_nei_patch_by_dir1[r_p*9 + 3*dirz + diry + 4];
if (s_p >= 0) {
float *r_f = &d_flds[((r_p * nr_fields) * gmz) * gmy];
float *s_f = &d_flds[((s_p * nr_fields)
* gmz - dirz * (gmz - 2*2))
* gmy - diry * (gmy - 2*2)];
for (int m = 0; m < NR_COMPONENTS; m++) {
int i = (m * gmz + jz) * gmy + jy;
r_f[i] = s_f[i];
}
}
}
template<int B, bool pack>
static void
fields_device_pack_yz(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
unsigned int size = mflds_cuda->im[0] * mflds_cuda->im[1] * mflds_cuda->im[2];
int gmy = mflds_cuda->im[1], gmz = mflds_cuda->im[2];
unsigned int buf_size = 2*B * (gmy + gmz - 2*B);
int n_threads = buf_size * (me - mb) * mflds->nr_patches;
dim3 dimGrid((n_threads + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK);
dim3 dimBlock(THREADS_PER_BLOCK);
const int NR_COMPONENTS = 3;
assert(me - mb == NR_COMPONENTS);
float *d_bnd_buf = mflds_cuda->d_bnd_buf;
float *d_flds = mflds_cuda->d_flds + mb * size;
k_fields_device_pack_yz<B, pack, NR_COMPONENTS> <<<dimGrid, dimBlock>>>
(d_bnd_buf, d_flds, gmy, gmz, mflds->nr_patches,
mflds->nr_fields);
cuda_sync_if_enabled();
}
template<int B, bool pack>
static void
fields_device_pack2_yz(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
const int NR_COMPONENTS = 3;
assert(me - mb == NR_COMPONENTS);
int *im = mflds_cuda->im;
assert(im[0] == 1);
int nr_fields = mflds->nr_fields;
int nr_patches = mflds->nr_patches;
int nr_ghosts = 2*B * (im[1] + im[2] - 2*B);
int nr_threads = nr_ghosts * nr_patches;
dim3 dimGrid((nr_threads + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK);
dim3 dimBlock(THREADS_PER_BLOCK);
float *d_flds = mflds_cuda->d_flds + mb * im[1] * im[2];
k_fields_device_pack2_yz<B, pack, NR_COMPONENTS> <<<dimGrid, dimBlock>>>
(mflds_cuda->d_bnd_buf, d_flds, mflds_cuda->d_nei_patch,
im[1], im[2], nr_patches, nr_fields);
cuda_sync_if_enabled();
}
EXTERN_C void
__fields_cuda_fill_ghosts_local(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
const int B = 2;
const int NR_COMPONENTS = 3;
assert(me - mb == NR_COMPONENTS);
int *im = mflds_cuda->im;
assert(im[0] == 1);
int nr_fields = mflds->nr_fields;
int nr_patches = mflds->nr_patches;
int nr_ghosts = 2*B * (im[1] + im[2] - 2*B);
int nr_threads = nr_ghosts * nr_patches;
dim3 dimGrid((nr_threads + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK);
dim3 dimBlock(THREADS_PER_BLOCK);
float *d_flds = mflds_cuda->d_flds + mb * im[1] * im[2];
k_fill_ghosts_local_yz<B, NR_COMPONENTS> <<<dimGrid, dimBlock>>>
(d_flds, mflds_cuda->d_nei_patch, im[1], im[2],
nr_fields, nr_patches);
cuda_sync_if_enabled();
#if 0
thrust::device_ptr<float> d_flds(mflds_cuda->d_flds);
thrust::host_vector<float> h_flds(d_flds, d_flds + nr_patches * nr_fields * im[2] *im[2]);
for (int tid = 0; tid < nr_threads; tid++) {
cuda_fill_ghosts_local_gold(&h_flds[0], nei_patch_by_dir1, mb, me, im, nr_fields,
nr_patches, nr_ghosts, tid);
}
thrust::copy(h_flds.begin(), h_flds.end(), d_flds);
#endif
}
// ======================================================================
// fields_host_pack
#define WHAT do { \
if (what == PACK) { \
h_buf[tid++] = F3_CF_0(cf, m, 0,jy,jz); \
} else if (what == UNPACK) { \
F3_CF_0(cf, m, 0,jy,jz) = h_buf[tid++]; \
} \
} while(0)
template<int B, int what>
static void
fields_host_pack_yz(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
int gmy = mflds_cuda->im[1], gmz = mflds_cuda->im[2];
unsigned int buf_size = 2*B * (gmy + gmz - 2*B);
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
struct psc_fields_cuda_bnd *cf = &flds_cuda->bnd;
real *h_buf = mflds_cuda->h_bnd_buf + p * buf_size * MAX_BND_COMPONENTS;
int gmy = cf->im[1], gmz = cf->im[2];
int tid = 0;
for (int m = 0; m < me - mb; m++) {
for (int jz = 0; jz < B; jz++) {
for (int jy = 0; jy < gmy; jy++) {
WHAT;
}
}
for (int jz = B; jz < gmz - B; jz++) {
for (int jy = 0; jy < B; jy++) {
WHAT;
}
for (int jy = gmy - B; jy < gmy; jy++) {
WHAT;
}
}
for (int jz = gmz - B; jz < gmz; jz++) {
for (int jy = 0; jy < gmy; jy++) {
WHAT;
}
}
}
}
}
template<int B, int what>
static void
fields_host_pack2_yz(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
real *h_buf = mflds_cuda->h_bnd_buf;
int tid = 0;
for (int m = 0; m < me - mb; m++) {
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
struct psc_fields_cuda_bnd *cf = &flds_cuda->bnd;
int gmy = cf->im[1], gmz = cf->im[2];
for (int jz = 0; jz < B; jz++) {
for (int jy = 0; jy < gmy; jy++) {
WHAT;
}
}
for (int jz = gmz - B; jz < gmz; jz++) {
for (int jy = 0; jy < gmy; jy++) {
WHAT;
}
}
for (int jz = B; jz < gmz - B; jz++) {
for (int jy = 0; jy < B; jy++) {
WHAT;
}
for (int jy = gmy - B; jy < gmy; jy++) {
WHAT;
}
}
}
}
}
#undef WHAT
template<int B, int what>
static void
fields_host_pack3_yz(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
int *im = mflds_cuda->im;
int nr_fields = mflds->nr_fields;
real *h_buf = mflds_cuda->h_bnd_buf;
int nr_map;
int *h_map;
if (B == 2) {
h_map = mflds_cuda->h_map_out;
nr_map = mflds_cuda->nr_map_out;
} else if (B == 4) {
h_map = mflds_cuda->h_map_in;
nr_map = mflds_cuda->nr_map_in;
} else {
assert(0);
}
for (int tid = 0; tid < nr_map; tid++) {
int i = h_map[tid];
int p = i / (nr_fields * im[2] * im[1]);
int off = i - p * (nr_fields * im[2] * im[1]);
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
struct psc_fields_cuda_bnd *cf = &flds_cuda->bnd;
for (int m = 0; m < me - mb; m++) {
if (what == PACK) {
h_buf[tid + m * nr_map] = cf->arr[off + m * im[2] * im[1]];
} else if (what == UNPACK) {
cf->arr[off + m * im[2] * im[1]] = h_buf[tid + m * nr_map];
}
}
}
}
template<int B>
static void
__fields_cuda_from_device_yz(struct psc_mfields *mflds, int mb, int me)
{
static int pr1, pr2, pr3;
if (!pr1) {
pr1 = prof_register("field_device_pack", 1., 0, 0);
pr2 = prof_register("cuda_memcpy", 1., 0, 0);
pr3 = prof_register("field_host_unpack", 1., 0, 0);
}
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
int gmy = mflds_cuda->im[1], gmz = mflds_cuda->im[2];
unsigned int buf_size = 2*B * (gmy + gmz - 2*B);
assert(me - mb <= MAX_BND_COMPONENTS);
assert(mflds_cuda->ib[1] == -BND);
assert(mflds_cuda->im[1] >= 2 * B);
assert(mflds_cuda->im[2] >= 2 * B);
prof_start(pr1);
fields_device_pack_yz<B, PACK>(mflds, mb, me);
prof_stop(pr1);
prof_start(pr2);
check(cudaMemcpy(mflds_cuda->h_bnd_buf, mflds_cuda->d_bnd_buf,
MAX_BND_COMPONENTS * buf_size * mflds->nr_patches *
sizeof(*mflds_cuda->h_bnd_buf),
cudaMemcpyDeviceToHost));
prof_stop(pr2);
prof_start(pr3);
fields_host_pack_yz<B, UNPACK>(mflds, mb, me);
prof_stop(pr3);
}
template<int B>
static void
__fields_cuda_to_device_yz(struct psc_mfields *mflds, int mb, int me)
{
static int pr1, pr2, pr3;
if (!pr1) {
pr1 = prof_register("field_host_pack", 1., 0, 0);
pr2 = prof_register("cuda_memcpy", 1., 0, 0);
pr3 = prof_register("field_device_unpack", 1., 0, 0);
}
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
int gmy = mflds_cuda->im[1], gmz = mflds_cuda->im[2];
unsigned int buf_size = 2*B * (gmy + gmz - 2*B);
assert(me - mb <= MAX_BND_COMPONENTS);
assert(mflds_cuda->ib[1] == -BND);
assert(mflds_cuda->im[1] >= 2 * B);
assert(mflds_cuda->im[2] >= 2 * B);
prof_start(pr1);
fields_host_pack_yz<B, PACK>(mflds, mb, me);
prof_stop(pr1);
prof_start(pr2);
check(cudaMemcpy(mflds_cuda->d_bnd_buf, mflds_cuda->h_bnd_buf,
MAX_BND_COMPONENTS * buf_size * mflds->nr_patches *
sizeof(*mflds_cuda->d_bnd_buf),
cudaMemcpyHostToDevice));
prof_stop(pr2);
prof_start(pr3);
fields_device_pack_yz<B, UNPACK>(mflds, mb, me);
prof_stop(pr3);
}
template<int B, bool pack>
static void fields_device_pack3_yz(struct psc_mfields *mflds, int mb, int me);
template<int B>
static void
__fields_cuda_from_device3_yz(struct psc_mfields *mflds, int mb, int me)
{
static int pr1, pr2, pr3;
if (!pr1) {
pr1 = prof_register("field_device_pack 3i", 1., 0, 0);
pr2 = prof_register("cuda_memcpy 3i", 1., 0, 0);
pr3 = prof_register("field_host_unpack 3i", 1., 0, 0);
}
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
assert(me - mb <= MAX_BND_COMPONENTS);
assert(mflds_cuda->ib[1] == -BND);
assert(mflds_cuda->im[1] >= 2 * B);
assert(mflds_cuda->im[2] >= 2 * B);
int nr_map;
if (B == 4) {
nr_map = mflds_cuda->nr_map_in;
} else {
assert(0);
}
prof_start(pr1);
fields_device_pack3_yz<B, PACK>(mflds, mb, me);
prof_stop(pr1);
prof_start(pr2);
assert(B == 4);
check(cudaMemcpy(mflds_cuda->h_bnd_buf, mflds_cuda->d_bnd_buf,
(me - mb) * nr_map * sizeof(*mflds_cuda->h_bnd_buf),
cudaMemcpyDeviceToHost));
prof_stop(pr2);
prof_start(pr3);
fields_host_pack3_yz<B, UNPACK>(mflds, mb, me);
prof_stop(pr3);
}
template<int B>
static void
__fields_cuda_to_device3_yz(struct psc_mfields *mflds, int mb, int me)
{
static int pr1, pr2, pr3;
if (!pr1) {
pr1 = prof_register("field_host_pack 3o", 1., 0, 0);
pr2 = prof_register("cuda_memcpy 3o", 1., 0, 0);
pr3 = prof_register("field_device_unpack 3o", 1., 0, 0);
}
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
assert(me - mb <= MAX_BND_COMPONENTS);
assert(mflds_cuda->ib[1] == -BND);
assert(mflds_cuda->im[1] >= 2 * B);
assert(mflds_cuda->im[2] >= 2 * B);
int nr_map;
if (B == 2) {
nr_map = mflds_cuda->nr_map_out;
} else {
assert(0);
}
prof_start(pr1);
fields_host_pack3_yz<B, PACK>(mflds, mb, me);
prof_stop(pr1);
prof_start(pr2);
check(cudaMemcpy(mflds_cuda->d_bnd_buf, mflds_cuda->h_bnd_buf,
(me - mb) * nr_map * sizeof(*mflds_cuda->d_bnd_buf),
cudaMemcpyHostToDevice));
prof_stop(pr2);
prof_start(pr3);
fields_device_pack3_yz<B, UNPACK>(mflds, mb, me);
prof_stop(pr3);
}
// ======================================================================
EXTERN_C void
__fields_cuda_from_device_inside(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
if (mflds_cuda->im[0] == 2 * -mflds_cuda->ib[0] + 1) {
__fields_cuda_from_device_yz<2*BND>(mflds, mb, me);
} else {
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
unsigned int size = flds->im[0] * flds->im[1] * flds->im[2];
check(cudaMemcpy(flds_cuda->bnd.arr,
flds_cuda->d_flds + mb * size,
(me - mb) * size * sizeof(float),
cudaMemcpyDeviceToHost));
}
}
}
EXTERN_C void
__fields_cuda_from_device_inside_only(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
if (mflds_cuda->im[0] == 2 * -mflds_cuda->ib[0] + 1) {
__fields_cuda_from_device3_yz<2*BND>(mflds, mb, me);
} else {
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
unsigned int size = flds->im[0] * flds->im[1] * flds->im[2];
check(cudaMemcpy(flds_cuda->bnd.arr,
flds_cuda->d_flds + mb * size,
(me - mb) * size * sizeof(float),
cudaMemcpyDeviceToHost));
}
}
}
EXTERN_C void
__fields_cuda_to_device_outside(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
if (mflds_cuda->im[0] == 2 * -mflds_cuda->ib[0] + 1) {
__fields_cuda_to_device3_yz<BND>(mflds, mb, me);
} else {
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
unsigned int size = flds->im[0] * flds->im[1] * flds->im[2];
check(cudaMemcpy(flds_cuda->d_flds + mb * size,
flds_cuda->bnd.arr,
(me - mb) * size * sizeof(float),
cudaMemcpyHostToDevice));
}
}
}
EXTERN_C void
__fields_cuda_to_device_inside(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
if (mflds_cuda->im[0] == 2 * -mflds_cuda->ib[0] + 1) {
__fields_cuda_to_device_yz<2*BND>(mflds, mb, me);
} else {
for (int p = 0; p < mflds->nr_patches; p++) {
struct psc_fields *flds = psc_mfields_get_patch(mflds, p);
struct psc_fields_cuda *flds_cuda = psc_fields_cuda(flds);
unsigned int size = flds->im[0] * flds->im[1] * flds->im[2];
check(cudaMemcpy(flds_cuda->d_flds + mb * size,
flds_cuda->bnd.arr,
(me - mb) * size * sizeof(float),
cudaMemcpyHostToDevice));
}
}
}
void
cuda_fill_ghosts_local_gold(float *h_flds, int *nei_patch_by_dir1, int mb, int me,
int *im, int nr_fields, int nr_patches, int nr_ghosts, int threadIdx)
{
int iy, iz;
int diry, dirz;
int r_p = threadIdx / nr_ghosts;
if (r_p >= nr_patches)
return;
int tid = threadIdx - nr_ghosts * r_p;
if (tid < 4 * im[1]) {
iy = tid % im[1];
iz = tid / im[1];
if (iy < 2) {
diry = -1;
} else if (iy < im[1] - 2) {
diry = 0;
} else {
diry = 1;
}
if (iz < 2) {
dirz = -1;
} else {
dirz = 1;
iz += im[2] - 2*2;
}
} else {
int tid2 = tid - 4 * im[1];
iy = tid2 % 4;
iz = tid2 / 4 + 2;
dirz = 0;
if (iy < 2) {
diry = -1;
} else {
diry = 1;
iy += im[1] - 2*2;
}
}
int s_p = nei_patch_by_dir1[r_p*9 + 3*dirz + diry + 4];
if (s_p >= 0) {
float *r_f = &h_flds[((r_p * nr_fields + mb) * im[2]) * im[1]];
float *s_f = &h_flds[((s_p * nr_fields + mb)
* im[2] - dirz * (im[2] - 2*2))
* im[1] - diry * (im[1] - 2*2)];
for (int m = 0; m < me - mb; m++) {
int i = (m * im[2] + iz) * im[1] + iy;
r_f[i] = s_f[i];
}
}
}
static void fields_create_map_out_yz(struct psc_mfields *mflds, int B, int *p_nr_map, int **p_h_map);
static void fields_create_map_in_yz(struct psc_mfields *mflds, int B, int *p_nr_map, int **p_h_map);
EXTERN_C void
__fields_cuda_fill_ghosts_setup(struct psc_mfields *mflds, struct mrc_ddc *ddc)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
int *im = mflds_cuda->im;
assert(im[0] == 1);
int nr_patches = mflds->nr_patches;
if (!mflds_cuda->h_nei_patch) {
struct mrc_ddc_multi *multi = to_mrc_ddc_multi(ddc);
struct mrc_ddc_pattern2 *patt2 = &multi->fill_ghosts2;
struct mrc_ddc_rank_info *ri = patt2->ri;
mflds_cuda->h_nei_patch = new int[9 * nr_patches];
for (int p = 0; p < nr_patches; p++) {
for (int dir1 = 0; dir1 < 9; dir1++) {
mflds_cuda->h_nei_patch[p * 9 + dir1] = -1;
}
}
for (int i = 0; i < ri[multi->mpi_rank].n_recv_entries; i++) {
struct mrc_ddc_sendrecv_entry *re = &ri[multi->mpi_rank].recv_entry[i];
mflds_cuda->h_nei_patch[re->patch * 9 + re->dir1 / 3] = re->nei_patch;
}
check(cudaMalloc((void **) &mflds_cuda->d_nei_patch,
9 * nr_patches * sizeof(*mflds_cuda->d_nei_patch)));
check(cudaMemcpy(mflds_cuda->d_nei_patch, mflds_cuda->h_nei_patch,
9 * nr_patches * sizeof(*mflds_cuda->d_nei_patch),
cudaMemcpyHostToDevice));
fields_create_map_out_yz(mflds, 2, &mflds_cuda->nr_map_out, &mflds_cuda->h_map_out);
check(cudaMalloc((void **) &mflds_cuda->d_map_out,
mflds_cuda->nr_map_out * sizeof(*mflds_cuda->d_map_out)));
check(cudaMemcpy(mflds_cuda->d_map_out, mflds_cuda->h_map_out,
mflds_cuda->nr_map_out * sizeof(*mflds_cuda->d_map_out),
cudaMemcpyHostToDevice));
fields_create_map_in_yz(mflds, 2, &mflds_cuda->nr_map_in, &mflds_cuda->h_map_in);
mprintf("map_out %d\n", mflds_cuda->nr_map_out);
mprintf("map_in %d\n", mflds_cuda->nr_map_in);
check(cudaMalloc((void **) &mflds_cuda->d_map_in,
mflds_cuda->nr_map_in * sizeof(*mflds_cuda->d_map_in)));
check(cudaMemcpy(mflds_cuda->d_map_in, mflds_cuda->h_map_in,
mflds_cuda->nr_map_in * sizeof(*mflds_cuda->d_map_in),
cudaMemcpyHostToDevice));
}
}
template<int WHAT>
static void
g_fields_device_pack3_yz(int tid, real *d_buf, real *d_flds, int *d_map, int *d_nei_patch_by_dir1,
int gmy, int gmz, int nr_patches, int nr_components, int nr_map)
{
if (tid >= nr_map)
return;
// copy only ghost areas that interface with remote patches
int i = d_map[tid];
for (int m = 0; m < nr_components; m++) {
// FIXME, should use F3_DEV_YZ
if (WHAT == PACK) {
d_buf[m * nr_map + tid] = d_flds[i + m * gmz * gmy];
} else if (WHAT == UNPACK) {
d_flds[i + m * gmz * gmy] = d_buf[m * nr_map + tid];
}
}
}
template<int WHAT>
__global__ static void
k_fields_device_pack3_yz(real *d_buf, real *d_flds, int *d_map, int *d_nei_patch_by_dir1,
int gmy, int gmz, int nr_patches, int nr_components, int nr_map)
{
int tid = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if (tid >= nr_map)
return;
// copy only ghost areas that interface with remote patches
int i = d_map[tid];
for (int m = 0; m < nr_components; m++) {
// FIXME, should use F3_DEV_YZ
if (WHAT == PACK) {
d_buf[m * nr_map + tid] = d_flds[i + m * gmz * gmy];
} else if (WHAT == UNPACK) {
d_flds[i + m * gmz * gmy] = d_buf[m * nr_map + tid];
}
}
}
#undef check
#undef _GLIBCXX_USE_INT128
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
static void
fields_create_map_out_yz(struct psc_mfields *mflds, int B, int *p_nr_map, int **p_h_map)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
bool remote_only = true;
int *im = mflds_cuda->im;
int nr_patches = mflds->nr_patches;
int nr_fields = mflds->nr_fields;
int nr_map = 0;
for (int p = 0; p < nr_patches; p++) {
int dir[3];
for (dir[2] = -1; dir[2] <= 1; dir[2]++) {
for (dir[1] = -1; dir[1] <= 1; dir[1]++) {
if (dir[1] == 0 && dir[2] == 0) {
continue;
}
int s_p = mflds_cuda->h_nei_patch[p*9 + 3*dir[2] + dir[1] + 4];
if (!remote_only || s_p < 0) {
nr_map += ((dir[1] == 0 ? im[1] - 2*B : B) *
(dir[2] == 0 ? im[2] - 2*B : B));
}
}
}
}
*p_nr_map = nr_map;
*p_h_map = new int[nr_map];
int tid = 0;
for (int p = 0; p < nr_patches; p++) {
for (int jjz = 0; jjz < 2*B; jjz++) {
int jz = jjz;
int dirz;
if (jz < B) {
dirz = -1;
} else {
dirz = 1;
jz += im[2] - 2*B;
}
for (int jy = 0; jy < im[1]; jy++) {
int diry;
if (jy < B) {
diry = -1;
} else if (jy < im[1] - B) {
diry = 0;
} else {
diry = 1;
}
int s_p = mflds_cuda->h_nei_patch[p*9 + 3*dirz + diry + 4];
if (!remote_only || s_p < 0) {
(*p_h_map)[tid++] = ((p * nr_fields + 0) * im[2] + jz) * im[1] + jy;
}
}
}
for (int jz = B; jz < im[2] - B; jz++) {
int dirz = 0;
for (int jjy = 0; jjy < 2*B; jjy++) {
int jy = jjy;
int diry;
if (jy < B) {
diry = -1;
} else {
diry = 1;
jy += im[1] - 2*B;
}
int s_p = mflds_cuda->h_nei_patch[p*9 + 3*dirz + diry + 4];
if (!remote_only || s_p < 0) {
(*p_h_map)[tid++] = ((p * nr_fields + 0) * im[2] + jz) * im[1] + jy;
}
}
}
}
assert(tid == nr_map);
}
static void
fields_create_map_in_yz(struct psc_mfields *mflds, int B, int *p_nr_map, int **p_h_map)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
bool remote_only = true;
int *im = mflds_cuda->im;
int ldims[3] = { 1, im[1] - 2*2, im[2] - 2*2 };
int nr_patches = mflds->nr_patches;
int nr_fields = mflds->nr_fields;
bool *has_nei = new bool[9 * nr_patches];
// FIXME, we don't need the ghosts here...
for (int p = 0; p < nr_patches; p++) {
int dir[3];
for (dir[2] = -1; dir[2] <= 1; dir[2]++) {
for (dir[1] = -1; dir[1] <= 1; dir[1]++) {
int dir1 = p*9 + 3*dir[2] + dir[1] + 4;
has_nei[dir1] = false;
int p_mm = mflds_cuda->h_nei_patch[p*9 + -1 + 3*-1 + 4];
int p_0m = mflds_cuda->h_nei_patch[p*9 + 0 + 3*-1 + 4];
int p_pm = mflds_cuda->h_nei_patch[p*9 + +1 + 3*-1 + 4];
int p_m0 = mflds_cuda->h_nei_patch[p*9 + -1 + 3* 0 + 4];
int p_p0 = mflds_cuda->h_nei_patch[p*9 + +1 + 3* 0 + 4];
int p_mp = mflds_cuda->h_nei_patch[p*9 + +1 + 3*+1 + 4];
int p_0p = mflds_cuda->h_nei_patch[p*9 + 0 + 3*+1 + 4];
int p_pp = mflds_cuda->h_nei_patch[p*9 + -1 + 3*+1 + 4];
if (dir[1] == 0 && dir[2] == 0) {
} else if (dir[1] == -1 && dir[2] == -1) {
if (p_mm < 0 || p_m0 < 0 || p_0m < 0) {
has_nei[dir1] = true;
}
} else if (dir[1] == 0 && dir[2] == -1) {
if (p_0m < 0) {
has_nei[dir1] = true;
}
} else if (dir[1] == +1 && dir[2] == -1) {
if (p_pm < 0 || p_0m < 0 || p_p0 < 0) {
has_nei[dir1] = true;
}
} else if (dir[1] == -1 && dir[2] == 0) {
if (p_m0 < 0) {
has_nei[dir1] = true;
}
} else if (dir[1] == +1 && dir[2] == 0) {
if (p_p0 < 0) {
has_nei[dir1] = true;
}
} else if (dir[1] == -1 && dir[2] == +1) {
if (p_mp < 0 || p_m0 < 0 || p_0p < 0) {
has_nei[dir1] = true;
}
} else if (dir[1] == 0 && dir[2] == +1) {
if (p_0p < 0) {
has_nei[dir1] = true;
}
} else if (dir[1] == +1 && dir[2] == +1) {
if (p_pp < 0 || p_0p < 0 || p_p0 < 0) {
has_nei[dir1] = true;
}
}
}
}
}
int nr_map = 0;
for (int p = 0; p < nr_patches; p++) {
int dir[3];
for (dir[2] = -1; dir[2] <= 1; dir[2]++) {
for (dir[1] = -1; dir[1] <= 1; dir[1]++) {
if (dir[1] == 0 && dir[2] == 0) {
continue;
}
int dir1 = p*9 + 3*dir[2] + dir[1] + 4;
if (!remote_only || has_nei[dir1]) {
nr_map += ((dir[1] == 0 ? ldims[1] - 2*B : B) *
(dir[2] == 0 ? ldims[2] - 2*B : B));
}
}
}
}
*p_nr_map = nr_map;
*p_h_map = new int[nr_map];
int tid = 0;
for (int p = 0; p < nr_patches; p++) {
for (int jjz = 0; jjz < 2*B; jjz++) {
int jz = jjz;
int dirz;
if (jz < B) {
dirz = -1;
} else {
dirz = 1;
jz += ldims[2] - 2*B;
}
for (int jy = 0; jy < ldims[1]; jy++) {
int diry;
if (jy < B) {
diry = -1;
} else if (jy < ldims[1] - B) {
diry = 0;
} else {
diry = 1;
}
int dir1 = p*9 + 3*dirz + diry + 4;
if (!remote_only || has_nei[dir1]) {
(*p_h_map)[tid++] = ((p * nr_fields + 0) * im[2] + (jz + B)) * im[1] + (jy + B);
}
}
}
for (int jz = B; jz < ldims[2] - B; jz++) {
int dirz = 0;
for (int jjy = 0; jjy < 2*B; jjy++) {
int jy = jjy;
int diry;
if (jy < B) {
diry = -1;
} else {
diry = 1;
jy += ldims[1] - 2*B;
}
int dir1 = p*9 + 3*dirz + diry + 4;
if (!remote_only || has_nei[dir1]) {
(*p_h_map)[tid++] = ((p * nr_fields + 0) * im[2] + (jz + B)) * im[1] + (jy + B);
}
}
}
}
assert(tid == nr_map);
delete[] has_nei;
}
template<int B, bool pack>
static void
fields_device_pack3_yz(struct psc_mfields *mflds, int mb, int me)
{
struct psc_mfields_cuda *mflds_cuda = psc_mfields_cuda(mflds);
const int NR_COMPONENTS = 3;
assert(me - mb == NR_COMPONENTS);
int *im = mflds_cuda->im;
assert(im[0] == 1);
int nr_patches = mflds->nr_patches;
int nr_map;
int *d_map;
if (B == 2) {
nr_map = mflds_cuda->nr_map_out;
d_map = mflds_cuda->d_map_out;
} else if (B == 4) {
nr_map = mflds_cuda->nr_map_in;
d_map = mflds_cuda->d_map_in;
} else {
assert(0);
}
#if 1
dim3 dimGrid((nr_map + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK);
dim3 dimBlock(THREADS_PER_BLOCK);
float *d_flds = mflds_cuda->d_flds + mb * im[1] * im[2];
k_fields_device_pack3_yz<pack> <<<dimGrid, dimBlock>>>
(mflds_cuda->d_bnd_buf, d_flds, d_map, mflds_cuda->d_nei_patch,
im[1], im[2], nr_patches, NR_COMPONENTS, nr_map);
cuda_sync_if_enabled();
#else
thrust::device_ptr<float> d_bnd_buf(mflds_cuda->d_bnd_buf);
thrust::device_ptr<float> d_flds(mflds_cuda->d_flds);
thrust::host_vector<float> h_bnd_buf(d_bnd_buf, d_bnd_buf + nr_patches * nr_ghosts * NR_COMPONENTS);
thrust::host_vector<float> h_flds(d_flds, d_flds + nr_patches * nr_fields * im[1] * im[2]);
for (int tid = 0; tid < nr_map; tid++) {
g_fields_device_pack3_yz<pack>
(tid, &h_bnd_buf[0], &h_flds[mb * im[1] * im[2]], &h_map[0], &h_nei_patch_by_dir1[0],
im[1], im[2], nr_patches, NR_COMPONENTS, nr_map);
}
thrust::copy(h_flds.begin(), h_flds.end(), d_flds);
#endif
}
|
7f3594f07f4f4f7283ad2c61b131a4287756360a.hip | // !!! This is a file automatically generated by hipify!!!
#include <DotMultiplicationModel.h>
#include <kernels.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <Constants.h>
#include <stdio.h>
DotMultiplicationModel::DotMultiplicationModel(int CPUCores): ComputationalModel(CPUCores) {}
DotMultiplicationModel::~DotMultiplicationModel() {}
void DotMultiplicationModel::CPUImplementation(){
printf("Hello CPU IMPL \n");
for (int i = 0; i < localL; i++)
{
//printf("\n%d\n%d\n", localA[i], localB[i]);
*localC += localA[i] * localB[i];
}
}
void DotMultiplicationModel::GPUImplementation(){
printf("Hello GPU IMPL \n");
// Allocate memory for arrays d_A, d_B, and d_result on device
int* d_A, * d_B;
int * d_result;
size_t bytes = localL * sizeof(int);
hipMalloc(&d_A, bytes);
hipMalloc(&d_B, bytes);
hipMalloc(&d_result, sizeof(int));
// Copy data from host arrays A and B to device arrays d_A and d_B
hipMemcpy(d_A, localA, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, localB, bytes, hipMemcpyHostToDevice);
// Set execution configuration parameters
// thr_per_blk: number of CUDA threads per grid block
// blk_in_grid: number of blocks in grid
int thr_per_blk = THREADS_PER_BLOCK;
int blk_in_grid = ceil(float(N) / thr_per_blk);
// Launch kernel
hipLaunchKernelGGL(( dot_product) , dim3(blk_in_grid), dim3(thr_per_blk) , 0, 0, d_A, d_B, d_result);
printf("%d", d_result);
// copy back to host
hipMemcpy(localC, d_result, sizeof(int), hipMemcpyDeviceToHost);
// Free GPU memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_result);
}
| 7f3594f07f4f4f7283ad2c61b131a4287756360a.cu | #include <DotMultiplicationModel.h>
#include <kernels.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <Constants.h>
#include <stdio.h>
DotMultiplicationModel::DotMultiplicationModel(int CPUCores): ComputationalModel(CPUCores) {}
DotMultiplicationModel::~DotMultiplicationModel() {}
void DotMultiplicationModel::CPUImplementation(){
printf("Hello CPU IMPL \n");
for (int i = 0; i < localL; i++)
{
//printf("\n%d\n%d\n", localA[i], localB[i]);
*localC += localA[i] * localB[i];
}
}
void DotMultiplicationModel::GPUImplementation(){
printf("Hello GPU IMPL \n");
// Allocate memory for arrays d_A, d_B, and d_result on device
int* d_A, * d_B;
int * d_result;
size_t bytes = localL * sizeof(int);
cudaMalloc(&d_A, bytes);
cudaMalloc(&d_B, bytes);
cudaMalloc(&d_result, sizeof(int));
// Copy data from host arrays A and B to device arrays d_A and d_B
cudaMemcpy(d_A, localA, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, localB, bytes, cudaMemcpyHostToDevice);
// Set execution configuration parameters
// thr_per_blk: number of CUDA threads per grid block
// blk_in_grid: number of blocks in grid
int thr_per_blk = THREADS_PER_BLOCK;
int blk_in_grid = ceil(float(N) / thr_per_blk);
// Launch kernel
dot_product <<< blk_in_grid, thr_per_blk >>> (d_A, d_B, d_result);
printf("%d", d_result);
// copy back to host
cudaMemcpy(localC, d_result, sizeof(int), cudaMemcpyDeviceToHost);
// Free GPU memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_result);
}
|
c456290212a73e01239f2ea273f4c72a88319854.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__device__ int x;
__global__ void unaligned_kernel()
{
*(int*) ((char*)&x + 1) = 42;
}
__device__ void out_of_bounds_function()
{
*(int*) 0x87654320 = 42;
}
__global__ void out_of_bounds_kernel()
{
out_of_bounds_function();
}
void run_unaligned(void)
{
printf("Running unaligned_kernel\n");
hipLaunchKernelGGL(( unaligned_kernel), dim3(1),dim3(1), 0, 0, );
printf("Ran unaligned_kernel: %s\n",
hipGetErrorString(hipGetLastError()));
printf("Sync: %s\n", hipGetErrorString(hipDeviceSynchronize()));
}
void run_out_of_bounds(void)
{
printf("Running out_of_bounds_kernel\n");
hipLaunchKernelGGL(( out_of_bounds_kernel), dim3(1),dim3(1), 0, 0, );
printf("Ran out_of_bounds_kernel: %s\n",
hipGetErrorString(hipGetLastError()));
printf("Sync: %s\n", hipGetErrorString(hipDeviceSynchronize()));
}
int main()
{
int *devMem;
printf("Mallocing memory\n");
hipMalloc((void**)&devMem, 1024);
run_unaligned();
run_out_of_bounds();
hipDeviceReset();
hipFree(devMem);
}
| c456290212a73e01239f2ea273f4c72a88319854.cu | #include <stdio.h>
__device__ int x;
__global__ void unaligned_kernel()
{
*(int*) ((char*)&x + 1) = 42;
}
__device__ void out_of_bounds_function()
{
*(int*) 0x87654320 = 42;
}
__global__ void out_of_bounds_kernel()
{
out_of_bounds_function();
}
void run_unaligned(void)
{
printf("Running unaligned_kernel\n");
unaligned_kernel<<<1,1>>>();
printf("Ran unaligned_kernel: %s\n",
cudaGetErrorString(cudaGetLastError()));
printf("Sync: %s\n", cudaGetErrorString(cudaThreadSynchronize()));
}
void run_out_of_bounds(void)
{
printf("Running out_of_bounds_kernel\n");
out_of_bounds_kernel<<<1,1>>>();
printf("Ran out_of_bounds_kernel: %s\n",
cudaGetErrorString(cudaGetLastError()));
printf("Sync: %s\n", cudaGetErrorString(cudaThreadSynchronize()));
}
int main()
{
int *devMem;
printf("Mallocing memory\n");
cudaMalloc((void**)&devMem, 1024);
run_unaligned();
run_out_of_bounds();
cudaDeviceReset();
cudaFree(devMem);
}
|
81de99c205b6aaf12202116617866ada183ecb74.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/*
* Host function to initialize vector elements. This function
* simply initializes each element to equal its index in the
* vector.
*/
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
/*
* Device kernel stores into `result` the sum of each
* same-indexed value of `a` and `b`.
*/
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
//int stride = blockDim.x * gridDim.x;
result[index] = a[index] +b[index];
//printf("%f /n",result);
/*
*for(int i = index; i < N; i += stride)
*{
* result[i] = a[i] + b[i];
*}
*/
}
/*
* Host function to confirm values in `vector`. This function
* assumes all values are the same `target` value.
*/
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
initWith(3, a, N);
initWith(4, b, N);
initWith(0, c, N);
size_t threadsPerBlock;
size_t numberOfBlocks;
/*
* nsys should register performance changes when execution configuration
* is updated.
*/
/*
*Our original time with 256 threads per block with ((N + threadsPerBlock - 1) / threadsPerBlock) Blocks at the end of Exercise 1 was 149813426 nanoseconds
*/
/*
*1. Changing to one thread per block. but still having enough total threads to do the operation by changing the number of threadblocks
*The time taken for this was 182202376 nanoseconds
*/
//threadsPerBlock = 1;
//numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
/*
*2. Changing to 1024 threads per block. with ((N + threadsPerBlock - 1) / threadsPerBlock) Blocks
*The time taken for this was 117600895 nanoseconds
*/
//threadsPerBlock = 1024;
//numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
/*
*3. Changing to 32 threads per block. with ((N + threadsPerBlock - 1) / threadsPerBlock) Blocks
*The time taken for this was 157480877 nanoseconds
*/
//threadsPerBlock = 32;
//numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
/*
*4. Changing to 128 threads per block. with ((N + threadsPerBlock - 1) / threadsPerBlock) Blocks
*The time taken for this was 124765401 nanoseconds
*/
//threadsPerBlock = 128;
//numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
/*
*5. Changing to 128 threads per block. with ((N + threadsPerBlock - 1) / threadsPerBlock) Blocks
*The time taken for this was 117036285 nanoseconds
*/
threadsPerBlock = 512;
numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
//It seems the most optimal configuration (out of the ones we tried) was with 512 threads per block
hipError_t addVectorsErr;
hipError_t asyncErr;
hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N);
addVectorsErr = hipGetLastError();
if(addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr));
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
checkElementsAre(7, c, N);
hipFree(a);
hipFree(b);
hipFree(c);
}
| 81de99c205b6aaf12202116617866ada183ecb74.cu | #include <stdio.h>
/*
* Host function to initialize vector elements. This function
* simply initializes each element to equal its index in the
* vector.
*/
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
/*
* Device kernel stores into `result` the sum of each
* same-indexed value of `a` and `b`.
*/
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
//int stride = blockDim.x * gridDim.x;
result[index] = a[index] +b[index];
//printf("%f /n",result);
/*
*for(int i = index; i < N; i += stride)
*{
* result[i] = a[i] + b[i];
*}
*/
}
/*
* Host function to confirm values in `vector`. This function
* assumes all values are the same `target` value.
*/
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
initWith(3, a, N);
initWith(4, b, N);
initWith(0, c, N);
size_t threadsPerBlock;
size_t numberOfBlocks;
/*
* nsys should register performance changes when execution configuration
* is updated.
*/
/*
*Our original time with 256 threads per block with ((N + threadsPerBlock - 1) / threadsPerBlock) Blocks at the end of Exercise 1 was 149813426 nanoseconds
*/
/*
*1. Changing to one thread per block. but still having enough total threads to do the operation by changing the number of threadblocks
*The time taken for this was 182202376 nanoseconds
*/
//threadsPerBlock = 1;
//numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
/*
*2. Changing to 1024 threads per block. with ((N + threadsPerBlock - 1) / threadsPerBlock) Blocks
*The time taken for this was 117600895 nanoseconds
*/
//threadsPerBlock = 1024;
//numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
/*
*3. Changing to 32 threads per block. with ((N + threadsPerBlock - 1) / threadsPerBlock) Blocks
*The time taken for this was 157480877 nanoseconds
*/
//threadsPerBlock = 32;
//numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
/*
*4. Changing to 128 threads per block. with ((N + threadsPerBlock - 1) / threadsPerBlock) Blocks
*The time taken for this was 124765401 nanoseconds
*/
//threadsPerBlock = 128;
//numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
/*
*5. Changing to 128 threads per block. with ((N + threadsPerBlock - 1) / threadsPerBlock) Blocks
*The time taken for this was 117036285 nanoseconds
*/
threadsPerBlock = 512;
numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
//It seems the most optimal configuration (out of the ones we tried) was with 512 threads per block
cudaError_t addVectorsErr;
cudaError_t asyncErr;
addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
addVectorsErr = cudaGetLastError();
if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
checkElementsAre(7, c, N);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
e6983a4c4d7245fa39152efa18479d2306cae61d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/cudf.h>
#include <io/utilities/wrapper_utils.hpp>
#include <utilities/error_utils.hpp>
#include <utilities/wrapper_types.hpp>
#include <utilities/bit_util.cuh>
#include <hip/hip_runtime.h>
#include <nvstrings/NVStrings.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <fstream>
#include <algorithm>
//
// This is called by the write_csv method below.
//
// Parameters:
// - column: The column to be converted.
// - row_offset: Number entries from the beginning to skip; must be multiple of 8.
// - rows: Number of rows from the offset that should be converted for this column.
// - delimiter: Separator to append to the column strings
// - null_representation: String to use for null entries
// - true_string: String to use for 'true' values in boolean columns
// - false_string: String to use for 'false' values in boolean columns
// Return: NVStrings instance formated for CSV column output.
//
NVStrings* column_to_strings_csv(const gdf_column* column, gdf_size_type row_offset, gdf_size_type rows,
const char* delimiter, const char* null_representation,
const char* true_string, const char* false_string )
{
NVStrings* rtn = nullptr;
// point the null bitmask to the next set of bits associated with this chunk of rows
auto valid = column->valid;
if( valid ) // normalize row_offset (number of bits here)
valid += (row_offset / GDF_VALID_BITSIZE); // to appropriate pointer for the bitmask
switch( column->dtype )
{
case GDF_STRING:
rtn = (static_cast<NVStrings*>(column->data))->sublist(row_offset,row_offset+rows);
break;
case GDF_BOOL8:
{
auto d_src = (static_cast<const cudf::bool8*>(column->data)) + row_offset;
device_buffer<bool> bool_buffer(rows);
thrust::transform(
rmm::exec_policy()->on(0), d_src, d_src + rows,
bool_buffer.data(),
[] __device__(const cudf::bool8 value) { return bool{value}; });
rtn = NVStrings::create_from_bools(bool_buffer.data(), rows,
true_string, false_string, valid);
}
break;
case GDF_INT32:
rtn = NVStrings::itos((static_cast<const int32_t*>(column->data))+row_offset,rows,valid);
break;
case GDF_INT64:
rtn = NVStrings::ltos((static_cast<const int64_t*>(column->data))+row_offset,rows,valid);
break;
case GDF_FLOAT32:
rtn = NVStrings::ftos((static_cast<const float*>(column->data))+row_offset,rows,valid);
break;
case GDF_FLOAT64:
rtn = NVStrings::dtos((static_cast<const double*>(column->data))+row_offset,rows,valid);
break;
case GDF_DATE64:
rtn = NVStrings::long2timestamp(static_cast<const uint64_t*>(column->data)+row_offset,rows,
NVStrings::ms,nullptr,valid);
break;
default:
break;
}
CUDF_EXPECTS( rtn != nullptr, "write_csv: unsupported column type");
// replace nulls if specified
if( null_representation )
{
NVStrings* nstr = rtn->fillna(null_representation);
NVStrings::destroy(rtn);
rtn = nstr;
}
// probably could collapse this more
bool bquoted = (column->dtype==GDF_STRING || column->dtype==GDF_DATE64);
// check for delimiters and quotes
bool* bmatches = nullptr;
RMM_TRY( RMM_ALLOC(&bmatches,rows*sizeof(bool),0) );
if( rtn->contains("\"",bmatches) > 0 )
{
NVStrings* esc = rtn->replace("\"","\"\"");
NVStrings::destroy(rtn);
rtn = esc;
}
else if( rtn->contains(",",bmatches) > 0 )
bquoted = true;
RMM_TRY( RMM_FREE( bmatches, 0 ) );
if( bquoted )
{
// prepend and append quotes if needed
NVStrings* pre = rtn->slice_replace("\"",0,0);
NVStrings::destroy(rtn);
rtn = pre->slice_replace("\"",-1,-1);
NVStrings::destroy(pre);
}
// append the delimiter last
if( delimiter && *delimiter )
{
NVStrings* dstr = rtn->slice_replace(delimiter,-1,-1);
NVStrings::destroy(rtn);
rtn = dstr;
}
return rtn;
}
//---------------------------------------------------------------------------
// Creates CSV file from array of gdf_columns.
//
// This will create the CSV format by allocating host memory for the
// entire output and determine pointers for each row/column entry.
// Each column is converted to an NVStrings instance and then
// copied into their position in the output memory. This way,
// one column is processed at a time minimizing device memory usage.
//
//---------------------------------------------------------------------------
gdf_error write_csv(csv_write_arg* args)
{
// when args becomes a struct/class these can be modified
auto columns = args->columns;
unsigned int count = (unsigned int)args->num_cols;
gdf_size_type total_rows = columns[0]->size;
const char* filepath = args->filepath;
char delimiter[2] = {',','\0'};
if( args->delimiter )
delimiter[0] = args->delimiter;
const char* terminator = "\n";
if( args->line_terminator )
terminator = args->line_terminator;
const char* narep = "";
if( args->na_rep )
narep = args->na_rep;
const char* true_value = (args->true_value ? args->true_value : "true");
const char* false_value = (args->false_value ? args->false_value : "false");
bool include_header = args->include_header;
// check for issues here
CUDF_EXPECTS( filepath!=nullptr, "write_csv: filepath not specified" );
CUDF_EXPECTS( count!=0, "write_csv: num_cols is required" );
CUDF_EXPECTS( columns!=0, "write_csv: invalid data values" );
// check all columns are the same size
const bool all_sizes_match = std::all_of( columns, columns+count,
[total_rows] (auto col) {
if( col->dtype==GDF_STRING )
{
NVStrings* strs = (NVStrings*)col->data;
unsigned int elems = strs != nullptr ? strs->size() : 0;
return (total_rows==(gdf_size_type)elems);
}
return (total_rows==col->size);
});
CUDF_EXPECTS( all_sizes_match, "write_csv: columns sizes do not match" );
// check the file can be written
std::ofstream filecsv(filepath,std::ios::out|std::ios::binary|std::ios::trunc);
CUDF_EXPECTS( filecsv.is_open(), "write_csv: file could not be opened");
//
// This outputs the CSV in row chunks to save memory.
// Maybe we can use the total_rows*count calculation and a memory threshold
// instead of an arbitrary chunk count.
// The entire CSV chunk must fit in CPU memory before writing it out.
//
gdf_size_type rows_chunk = (args->rows_per_chunk/8)*8; // must be divisible by 8
CUDF_EXPECTS( rows_chunk>0, "write_csv: invalid chunk_rows; must be at least 8" );
gdf_size_type row_offset = 0;
gdf_size_type rows = total_rows;
while( rows > 0 )
{
if( rows > rows_chunk )
rows = rows_chunk;
//
// Compute string lengths for each string to go into the CSV output.
std::unique_ptr<int[]> pstring_lengths(new int[rows*count]); // matrix of lengths
int* string_lengths = pstring_lengths.get(); // each string length in each row,column
size_t memsize = 0;
for( unsigned int idx=0; idx < count; ++idx )
{
const gdf_column* col = columns[idx];
const char* delim = ((idx+1)<count ? delimiter : terminator);
NVStrings* strs = column_to_strings_csv(col,row_offset,rows,delim,narep,true_value,false_value);
memsize += strs->byte_count(string_lengths + (idx*rows),false);
NVStrings::destroy(strs);
}
//
// Example string_lengths matrix for 4 columns and 7 rows
// row-sums
// col0: 1, 1, 2, 11, 12, 7, 7 | 41
// col1: 1, 1, 2, 2, 3, 7, 6 | 22
// col2: 20, 20, 20, 20, 20, 20, 20 | 140
// col3: 5, 6, 4, 6, 4, 4, 5 | 34
// --------------------------------
// col- 27, 28, 28, 39, 39, 38, 38 = 237 (for reference only)
// sums
//
// Need to convert this into the following -- string_locations (below)
// 0, 27, 55, 83, 122, 161, 199
// 1, 28, 57, 94, 134, 168, 206
// 2, 29, 59, 96, 137, 175, 212
// 22, 49, 79, 116, 157, 195, 232
//
// This is essentially an exclusive-scan (prefix-sum) across columns.
// Moving left-to-right, add up each column and carry each value to the next column.
// Looks like we could transpose the matrix, scan it, and then untranspose it.
// Should be able to parallelize the math for this -- will look at prefix-sum algorithms.
//
std::vector<char> buffer(memsize+1);
std::vector<size_t> string_locations(rows*count); // all the memory pointers for each column
string_locations[0] = 0; // first one is always 0
// compute offsets as described above into locations matrix
size_t offset = 0;
for( gdf_size_type jdx=0; jdx < rows; ++jdx )
{
// add up column values for each row
// this is essentially an exclusive-scan across columns
string_locations[jdx] = (size_t)(buffer.data() + offset); // initialize first item
for( unsigned int idx=0; idx < count; ++idx )
{
int* in = string_lengths + (idx*rows);
int len = in[jdx];
offset += (len > 0 ? len:0);
if( (idx+1) < count )
{
size_t* out = string_locations.data() + ((idx+1)*rows);
out[jdx] = (size_t)(buffer.data() + offset);
}
}
}
// now fill in the memory one column at a time
for( unsigned int idx=0; idx < count; ++idx )
{
const gdf_column* col = columns[idx];
const char* delim = ((idx+1)<count ? delimiter : terminator);
NVStrings* strs = column_to_strings_csv(col,row_offset,rows,delim,narep,true_value,false_value);
size_t* colptrs = string_locations.data() + (idx*rows);
// to_host places all the strings into their correct positions in host memory
strs->to_host((char**)colptrs,0,rows);
NVStrings::destroy(strs);
}
//buffer[memsize] = 0; // just so we can printf if needed
// now write buffer to file
// first write the header
if(include_header)
{
for( unsigned int idx=0; idx < count; ++idx )
{
const gdf_column* col = columns[idx];
const char* delim = ((idx+1)<count ? delimiter : terminator);
if( col->col_name )
filecsv << "\"" << col->col_name << "\"";
filecsv << delim;
}
}
// now write the data
filecsv.write(buffer.data(),memsize);
// get ready for the next chunk of rows
row_offset += rows_chunk;
if( row_offset < total_rows )
rows = total_rows - row_offset;
else
rows = 0;
// prevent header for subsequent chunks
include_header = false;
}
filecsv.close();
return gdf_error::GDF_SUCCESS;
}
| e6983a4c4d7245fa39152efa18479d2306cae61d.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/cudf.h>
#include <io/utilities/wrapper_utils.hpp>
#include <utilities/error_utils.hpp>
#include <utilities/wrapper_types.hpp>
#include <utilities/bit_util.cuh>
#include <cuda_runtime.h>
#include <nvstrings/NVStrings.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <fstream>
#include <algorithm>
//
// This is called by the write_csv method below.
//
// Parameters:
// - column: The column to be converted.
// - row_offset: Number entries from the beginning to skip; must be multiple of 8.
// - rows: Number of rows from the offset that should be converted for this column.
// - delimiter: Separator to append to the column strings
// - null_representation: String to use for null entries
// - true_string: String to use for 'true' values in boolean columns
// - false_string: String to use for 'false' values in boolean columns
// Return: NVStrings instance formated for CSV column output.
//
NVStrings* column_to_strings_csv(const gdf_column* column, gdf_size_type row_offset, gdf_size_type rows,
const char* delimiter, const char* null_representation,
const char* true_string, const char* false_string )
{
NVStrings* rtn = nullptr;
// point the null bitmask to the next set of bits associated with this chunk of rows
auto valid = column->valid;
if( valid ) // normalize row_offset (number of bits here)
valid += (row_offset / GDF_VALID_BITSIZE); // to appropriate pointer for the bitmask
switch( column->dtype )
{
case GDF_STRING:
rtn = (static_cast<NVStrings*>(column->data))->sublist(row_offset,row_offset+rows);
break;
case GDF_BOOL8:
{
auto d_src = (static_cast<const cudf::bool8*>(column->data)) + row_offset;
device_buffer<bool> bool_buffer(rows);
thrust::transform(
rmm::exec_policy()->on(0), d_src, d_src + rows,
bool_buffer.data(),
[] __device__(const cudf::bool8 value) { return bool{value}; });
rtn = NVStrings::create_from_bools(bool_buffer.data(), rows,
true_string, false_string, valid);
}
break;
case GDF_INT32:
rtn = NVStrings::itos((static_cast<const int32_t*>(column->data))+row_offset,rows,valid);
break;
case GDF_INT64:
rtn = NVStrings::ltos((static_cast<const int64_t*>(column->data))+row_offset,rows,valid);
break;
case GDF_FLOAT32:
rtn = NVStrings::ftos((static_cast<const float*>(column->data))+row_offset,rows,valid);
break;
case GDF_FLOAT64:
rtn = NVStrings::dtos((static_cast<const double*>(column->data))+row_offset,rows,valid);
break;
case GDF_DATE64:
rtn = NVStrings::long2timestamp(static_cast<const uint64_t*>(column->data)+row_offset,rows,
NVStrings::ms,nullptr,valid);
break;
default:
break;
}
CUDF_EXPECTS( rtn != nullptr, "write_csv: unsupported column type");
// replace nulls if specified
if( null_representation )
{
NVStrings* nstr = rtn->fillna(null_representation);
NVStrings::destroy(rtn);
rtn = nstr;
}
// probably could collapse this more
bool bquoted = (column->dtype==GDF_STRING || column->dtype==GDF_DATE64);
// check for delimiters and quotes
bool* bmatches = nullptr;
RMM_TRY( RMM_ALLOC(&bmatches,rows*sizeof(bool),0) );
if( rtn->contains("\"",bmatches) > 0 )
{
NVStrings* esc = rtn->replace("\"","\"\"");
NVStrings::destroy(rtn);
rtn = esc;
}
else if( rtn->contains(",",bmatches) > 0 )
bquoted = true;
RMM_TRY( RMM_FREE( bmatches, 0 ) );
if( bquoted )
{
// prepend and append quotes if needed
NVStrings* pre = rtn->slice_replace("\"",0,0);
NVStrings::destroy(rtn);
rtn = pre->slice_replace("\"",-1,-1);
NVStrings::destroy(pre);
}
// append the delimiter last
if( delimiter && *delimiter )
{
NVStrings* dstr = rtn->slice_replace(delimiter,-1,-1);
NVStrings::destroy(rtn);
rtn = dstr;
}
return rtn;
}
//---------------------------------------------------------------------------
// Creates CSV file from array of gdf_columns.
//
// This will create the CSV format by allocating host memory for the
// entire output and determine pointers for each row/column entry.
// Each column is converted to an NVStrings instance and then
// copied into their position in the output memory. This way,
// one column is processed at a time minimizing device memory usage.
//
//---------------------------------------------------------------------------
gdf_error write_csv(csv_write_arg* args)
{
// when args becomes a struct/class these can be modified
auto columns = args->columns;
unsigned int count = (unsigned int)args->num_cols;
gdf_size_type total_rows = columns[0]->size;
const char* filepath = args->filepath;
char delimiter[2] = {',','\0'};
if( args->delimiter )
delimiter[0] = args->delimiter;
const char* terminator = "\n";
if( args->line_terminator )
terminator = args->line_terminator;
const char* narep = "";
if( args->na_rep )
narep = args->na_rep;
const char* true_value = (args->true_value ? args->true_value : "true");
const char* false_value = (args->false_value ? args->false_value : "false");
bool include_header = args->include_header;
// check for issues here
CUDF_EXPECTS( filepath!=nullptr, "write_csv: filepath not specified" );
CUDF_EXPECTS( count!=0, "write_csv: num_cols is required" );
CUDF_EXPECTS( columns!=0, "write_csv: invalid data values" );
// check all columns are the same size
const bool all_sizes_match = std::all_of( columns, columns+count,
[total_rows] (auto col) {
if( col->dtype==GDF_STRING )
{
NVStrings* strs = (NVStrings*)col->data;
unsigned int elems = strs != nullptr ? strs->size() : 0;
return (total_rows==(gdf_size_type)elems);
}
return (total_rows==col->size);
});
CUDF_EXPECTS( all_sizes_match, "write_csv: columns sizes do not match" );
// check the file can be written
std::ofstream filecsv(filepath,std::ios::out|std::ios::binary|std::ios::trunc);
CUDF_EXPECTS( filecsv.is_open(), "write_csv: file could not be opened");
//
// This outputs the CSV in row chunks to save memory.
// Maybe we can use the total_rows*count calculation and a memory threshold
// instead of an arbitrary chunk count.
// The entire CSV chunk must fit in CPU memory before writing it out.
//
gdf_size_type rows_chunk = (args->rows_per_chunk/8)*8; // must be divisible by 8
CUDF_EXPECTS( rows_chunk>0, "write_csv: invalid chunk_rows; must be at least 8" );
gdf_size_type row_offset = 0;
gdf_size_type rows = total_rows;
while( rows > 0 )
{
if( rows > rows_chunk )
rows = rows_chunk;
//
// Compute string lengths for each string to go into the CSV output.
std::unique_ptr<int[]> pstring_lengths(new int[rows*count]); // matrix of lengths
int* string_lengths = pstring_lengths.get(); // each string length in each row,column
size_t memsize = 0;
for( unsigned int idx=0; idx < count; ++idx )
{
const gdf_column* col = columns[idx];
const char* delim = ((idx+1)<count ? delimiter : terminator);
NVStrings* strs = column_to_strings_csv(col,row_offset,rows,delim,narep,true_value,false_value);
memsize += strs->byte_count(string_lengths + (idx*rows),false);
NVStrings::destroy(strs);
}
//
// Example string_lengths matrix for 4 columns and 7 rows
// row-sums
// col0: 1, 1, 2, 11, 12, 7, 7 | 41
// col1: 1, 1, 2, 2, 3, 7, 6 | 22
// col2: 20, 20, 20, 20, 20, 20, 20 | 140
// col3: 5, 6, 4, 6, 4, 4, 5 | 34
// --------------------------------
// col- 27, 28, 28, 39, 39, 38, 38 = 237 (for reference only)
// sums
//
// Need to convert this into the following -- string_locations (below)
// 0, 27, 55, 83, 122, 161, 199
// 1, 28, 57, 94, 134, 168, 206
// 2, 29, 59, 96, 137, 175, 212
// 22, 49, 79, 116, 157, 195, 232
//
// This is essentially an exclusive-scan (prefix-sum) across columns.
// Moving left-to-right, add up each column and carry each value to the next column.
// Looks like we could transpose the matrix, scan it, and then untranspose it.
// Should be able to parallelize the math for this -- will look at prefix-sum algorithms.
//
std::vector<char> buffer(memsize+1);
std::vector<size_t> string_locations(rows*count); // all the memory pointers for each column
string_locations[0] = 0; // first one is always 0
// compute offsets as described above into locations matrix
size_t offset = 0;
for( gdf_size_type jdx=0; jdx < rows; ++jdx )
{
// add up column values for each row
// this is essentially an exclusive-scan across columns
string_locations[jdx] = (size_t)(buffer.data() + offset); // initialize first item
for( unsigned int idx=0; idx < count; ++idx )
{
int* in = string_lengths + (idx*rows);
int len = in[jdx];
offset += (len > 0 ? len:0);
if( (idx+1) < count )
{
size_t* out = string_locations.data() + ((idx+1)*rows);
out[jdx] = (size_t)(buffer.data() + offset);
}
}
}
// now fill in the memory one column at a time
for( unsigned int idx=0; idx < count; ++idx )
{
const gdf_column* col = columns[idx];
const char* delim = ((idx+1)<count ? delimiter : terminator);
NVStrings* strs = column_to_strings_csv(col,row_offset,rows,delim,narep,true_value,false_value);
size_t* colptrs = string_locations.data() + (idx*rows);
// to_host places all the strings into their correct positions in host memory
strs->to_host((char**)colptrs,0,rows);
NVStrings::destroy(strs);
}
//buffer[memsize] = 0; // just so we can printf if needed
// now write buffer to file
// first write the header
if(include_header)
{
for( unsigned int idx=0; idx < count; ++idx )
{
const gdf_column* col = columns[idx];
const char* delim = ((idx+1)<count ? delimiter : terminator);
if( col->col_name )
filecsv << "\"" << col->col_name << "\"";
filecsv << delim;
}
}
// now write the data
filecsv.write(buffer.data(),memsize);
// get ready for the next chunk of rows
row_offset += rows_chunk;
if( row_offset < total_rows )
rows = total_rows - row_offset;
else
rows = 0;
// prevent header for subsequent chunks
include_header = false;
}
filecsv.close();
return gdf_error::GDF_SUCCESS;
}
|
272031965b26d6f1d5439fdaaeb49c1723d1881e.hip | // !!! This is a file automatically generated by hipify!!!
/* Bluebird Library - High performance CPUs and GPUs computing library.
*
* Copyright (C) 2012-2013 Orange Owl Solutions.
*
* This file is part of Bluebird Library.
* Bluebird Library is free software: you can redistribute it and/or modify
* it under the terms of the Lesser GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Bluebird Library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Lesser GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Bluebird Library. If not, see <http://www.gnu.org/licenses/>.
*
*
* For any request, question or bug reporting please visit http://www.orangeowlsolutions.com/
* or send an e-mail to: [email protected]
*
*
*/
#include "BB.h"
#include "HExceptions.h"
#include "Hmatrix.h"
#include "Dmatrix.cuh"
#include "Division.cuh"
#include "Expression.cuh"
#include "Promotion.cuh"
#include "Scalar.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define Scalar_Scalar(T1,T2,OpClass,overloaded_operator) template <> typename BB::Promotion<T1,T2>::strongest BB::overloaded_operator(const T1 a,const T2 b) { return BB::OpClass::eval(a,b); }
Promotion<int,int2_>::strongest operator/(const int a,const int2_ b) { return BB::Div::eval(a,b); }
Promotion<int,float2_>::strongest operator/(const int a,const float2_ b) { return BB::Div::eval(a,b); }
Promotion<int,double2_>::strongest operator/(const int a,const double2_ b) { return BB::Div::eval(a,b); }
Promotion<float,int2_>::strongest operator/(const float a,const int2_ b) { return BB::Div::eval(a,b); }
Promotion<float,float2_>::strongest operator/(const float a,const float2_ b) { return BB::Div::eval(a,b); }
Promotion<float,double2_>::strongest operator/(const float a,const double2_ b) { return BB::Div::eval(a,b); }
Promotion<double,int2_>::strongest operator/(const double a,const int2_ b) { return BB::Div::eval(a,b); }
Promotion<double,float2_>::strongest operator/(const double a,const float2_ b) { return BB::Div::eval(a,b); }
Promotion<double,double2_>::strongest operator/(const double a,const double2_ b) { return BB::Div::eval(a,b); }
Promotion<int2_,int>::strongest operator/(const int2_ a,const int b) { return BB::Div::eval(a,b); }
Promotion<int2_,float>::strongest operator/(const int2_ a,const float b) { return BB::Div::eval(a,b); }
Promotion<int2_,double>::strongest operator/(const int2_ a,const double b) { return BB::Div::eval(a,b); }
Promotion<int2_,int2_>::strongest operator/(const int2_ a,const int2_ b) { return BB::Div::eval(a,b); }
Promotion<int2_,float2_>::strongest operator/(const int2_ a,const float2_ b) { return BB::Div::eval(a,b); }
Promotion<int2_,double2_>::strongest operator/(const int2_ a,const double2_ b) { return BB::Div::eval(a,b); }
Promotion<float2_,int>::strongest operator/(const float2_ a,const int b) { return BB::Div::eval(a,b); }
Promotion<float2_,float>::strongest operator/(const float2_ a,const float b) { return BB::Div::eval(a,b); }
Promotion<float2_,double>::strongest operator/(const float2_ a,const double b) { return BB::Div::eval(a,b); }
Promotion<float2_,int2_>::strongest operator/(const float2_ a,const int2_ b) { return BB::Div::eval(a,b); }
Promotion<float2_,float2_>::strongest operator/(const float2_ a,const float2_ b) { return BB::Div::eval(a,b); }
Promotion<float2_,double2_>::strongest operator/(const float2_ a,const double2_ b) { return BB::Div::eval(a,b); }
Promotion<double2_,int>::strongest operator/(const double2_ a,const int b) { return BB::Div::eval(a,b); }
Promotion<double2_,float>::strongest operator/(const double2_ a,const float b) { return BB::Div::eval(a,b); }
Promotion<double2_,double>::strongest operator/(const double2_ a,const double b) { return BB::Div::eval(a,b); }
Promotion<double2_,int2_>::strongest operator/(const double2_ a,const int2_ b) { return BB::Div::eval(a,b); }
Promotion<double2_,float2_>::strongest operator/(const double2_ a,const float2_ b) { return BB::Div::eval(a,b); }
Promotion<double2_,double2_>::strongest operator/(const double2_ a,const double2_ b) { return BB::Div::eval(a,b); }
#define Matrix_Scalar_Matrix(T1,T2,OpClass,overloaded_operator) template <> \
BB::Expr<BB::BinExpr<const T1*,const BB::Scalar<T2>,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Hmatrix<T1> &v1,const T2 v2) \
{ \
BB::Scalar<T2> c(v2); \
typedef BB::BinExpr<const T1*,const BB::Scalar<T2>,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \
return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(v1.GetDataPointer(),c),v1.GetRows(),v1.GetColumns(),ISHOST); \
}
#define Matrix_Scalar_CudaMatrix(T1,T2,OpClass,overloaded_operator) template <> \
BB::Expr<BB::BinExpr<const T1*,const BB::Scalar<T2>,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Dmatrix<T1> &v1,const T2 v2) \
{ \
BB::Scalar<T2> c(v2); \
typedef BB::BinExpr<const T1*,const BB::Scalar<T2>,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \
return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(v1.GetDataPointer(),c),v1.GetRows(),v1.GetColumns(),ISDEVICE); \
}
#define Scalar_Matrix_Matrix(T1,T2,OpClass,overloaded_operator) template <> BB::Expr<BB::BinExpr<const BB::Scalar<T2>,const T1*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const T2 v1,const BB::Hmatrix<T1> &v2) \
{ \
BB::Scalar<T2> c(v1); \
typedef BB::BinExpr<const BB::Scalar<T2>,const T1*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \
return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(c,v2.GetDataPointer()),v2.GetRows(),v2.GetColumns(),ISHOST); \
}
#define Scalar_Matrix_CudaMatrix(T1,T2,OpClass,overloaded_operator) template <> BB::Expr<BB::BinExpr<const BB::Scalar<T2>,const T1*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const T2 v1,const BB::Dmatrix<T1> &v2) \
{ \
BB::Scalar<T2> c(v1); \
typedef BB::BinExpr<const BB::Scalar<T2>,const T1*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \
return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(c,v2.GetDataPointer()),v2.GetRows(),v2.GetColumns(),ISDEVICE); \
}
// Hmatrix-Hmatrix Hmatrix -- TESTED
#define Matrix_Matrix_Matrix(T1,T2,OpClass,overloaded_operator) template <class T1,class T2> \
BB::Expr<BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Hmatrix<T1> &v1,const BB::Hmatrix<T2> &v2) \
{ \
if((v1.GetRows() == v2.GetRows()) && (v1.GetColumns() == v2.GetColumns())) \
{ \
typedef BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \
return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(v1.GetDataPointer(),v2.GetDataPointer()),v1.GetRows(),v1.GetColumns(),ISHOST); \
} else { char* str0 = "****************************************************************\n"; \
char* str1 = "* Size mismatch in binary CPU matrix operation (matrix-matrix) *\n"; \
char* str2 = "Left operand size: "; \
char* str3 = "Right operand size: "; \
char* str4 = "Operation: "; \
char* catString = (char*) malloc(2*strlen(str0)+strlen(str1)+strlen(str2)+strlen(str3)+strlen(str4)+50*sizeof(char)); \
sprintf(catString, "%s%s%s\n%s%i x %i\n%s%i x %i\n%s%s",str0,str1,str0,str2,v1.GetRows(),v1.GetColumns(),str3,v2.GetRows(),v2.GetColumns(),str4,typeid(BB::OpClass).name()); \
throw BB::GenericError(catString,__FILE__,__LINE__); \
} \
}
// Hmatrix-Hmatrix Dmatrix -- TESTED
#define Matrix_Matrix_CudaMatrix(T1,T2,OpClass,overloaded_operator) template <class T1,class T2> \
BB::Expr<BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Dmatrix<T1> &v1,const BB::Dmatrix<T2> &v2) \
{ \
if((v1.GetRows() == v2.GetRows()) && (v1.GetColumns() == v2.GetColumns())) \
{ \
typedef BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \
return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(v1.GetDataPointer(),v2.GetDataPointer()),v1.GetRows(),v1.GetColumns(),ISDEVICE); \
} else { char* str0 = "****************************************************************\n"; \
char* str1 = "* Size mismatch in binary GPU matrix operation (matrix-matrix) *\n"; \
char* str2 = "Left operand size: "; \
char* str3 = "Right operand size: "; \
char* str4 = "Operation: "; \
char* catString = (char*) malloc(2*strlen(str0)+strlen(str1)+strlen(str2)+strlen(str3)+strlen(str4)+50*sizeof(char)); \
sprintf(catString, "%s%s%s\n%s%i x %i\n%s%i x %i\n%s%s",str0,str1,str0,str2,v1.GetRows(),v1.GetColumns(),str3,v2.GetRows(),v2.GetColumns(),str4,typeid(BB::OpClass).name()); \
throw BB::GenericError(catString,__FILE__,__LINE__); \
} \
}
Matrix_Matrix_Matrix(T1,T2,Div,operator/)
Matrix_Matrix_CudaMatrix(T1,T2,Div,operator/)
// Hmatrix-Hmatrix Hmatrix -- TESTED
#define Matrix_Matrix_Matrix_Prototype(T1,T2,OpClass,overloaded_operator) template BB::Expr<BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Hmatrix<T1>&,const BB::Hmatrix<T2>&);
// Hmatrix-Hmatrix Dmatrix -- TESTED
#define Matrix_Matrix_CudaMatrix_Prototype(T1,T2,OpClass,overloaded_operator) template BB::Expr<BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Dmatrix<T1>&,const BB::Dmatrix<T2>&);
Matrix_Scalar_Matrix(int,int,Div,operator/)
Matrix_Scalar_Matrix(int,float,Div,operator/)
Matrix_Scalar_Matrix(int,double,Div,operator/)
Matrix_Scalar_Matrix(int,BB::int2_,Div,operator/)
Matrix_Scalar_Matrix(int,BB::float2_,Div,operator/)
Matrix_Scalar_Matrix(int,BB::double2_,Div,operator/)
Matrix_Scalar_Matrix(float,int,Div,operator/)
Matrix_Scalar_Matrix(float,float,Div,operator/)
Matrix_Scalar_Matrix(float,double,Div,operator/)
Matrix_Scalar_Matrix(float,BB::int2_,Div,operator/)
Matrix_Scalar_Matrix(float,BB::float2_,Div,operator/)
Matrix_Scalar_Matrix(float,BB::double2_,Div,operator/)
Matrix_Scalar_Matrix(double,int,Div,operator/)
Matrix_Scalar_Matrix(double,float,Div,operator/)
Matrix_Scalar_Matrix(double,double,Div,operator/)
Matrix_Scalar_Matrix(double,BB::int2_,Div,operator/)
Matrix_Scalar_Matrix(double,BB::float2_,Div,operator/)
Matrix_Scalar_Matrix(double,BB::double2_,Div,operator/)
Matrix_Scalar_Matrix(BB::int2_,int,Div,operator/)
Matrix_Scalar_Matrix(BB::int2_,float,Div,operator/)
Matrix_Scalar_Matrix(BB::int2_,double,Div,operator/)
Matrix_Scalar_Matrix(BB::int2_,BB::int2_,Div,operator/)
Matrix_Scalar_Matrix(BB::int2_,BB::float2_,Div,operator/)
Matrix_Scalar_Matrix(BB::int2_,BB::double2_,Div,operator/)
Matrix_Scalar_Matrix(BB::float2_,int,Div,operator/)
Matrix_Scalar_Matrix(BB::float2_,float,Div,operator/)
Matrix_Scalar_Matrix(BB::float2_,double,Div,operator/)
Matrix_Scalar_Matrix(BB::float2_,BB::int2_,Div,operator/)
Matrix_Scalar_Matrix(BB::float2_,BB::float2_,Div,operator/)
Matrix_Scalar_Matrix(BB::float2_,BB::double2_,Div,operator/)
Matrix_Scalar_Matrix(BB::double2_,int,Div,operator/)
Matrix_Scalar_Matrix(BB::double2_,float,Div,operator/)
Matrix_Scalar_Matrix(BB::double2_,double,Div,operator/)
Matrix_Scalar_Matrix(BB::double2_,BB::int2_,Div,operator/)
Matrix_Scalar_Matrix(BB::double2_,BB::float2_,Div,operator/)
Matrix_Scalar_Matrix(BB::double2_,BB::double2_,Div,operator/)
Matrix_Scalar_CudaMatrix(int,int,Div,operator/)
Matrix_Scalar_CudaMatrix(int,float,Div,operator/)
Matrix_Scalar_CudaMatrix(int,double,Div,operator/)
Matrix_Scalar_CudaMatrix(int,BB::int2_,Div,operator/)
Matrix_Scalar_CudaMatrix(int,BB::float2_,Div,operator/)
Matrix_Scalar_CudaMatrix(int,BB::double2_,Div,operator/)
Matrix_Scalar_CudaMatrix(float,int,Div,operator/)
Matrix_Scalar_CudaMatrix(float,float,Div,operator/)
Matrix_Scalar_CudaMatrix(float,double,Div,operator/)
Matrix_Scalar_CudaMatrix(float,BB::int2_,Div,operator/)
Matrix_Scalar_CudaMatrix(float,BB::float2_,Div,operator/)
Matrix_Scalar_CudaMatrix(float,BB::double2_,Div,operator/)
Matrix_Scalar_CudaMatrix(double,int,Div,operator/)
Matrix_Scalar_CudaMatrix(double,float,Div,operator/)
Matrix_Scalar_CudaMatrix(double,double,Div,operator/)
Matrix_Scalar_CudaMatrix(double,BB::int2_,Div,operator/)
Matrix_Scalar_CudaMatrix(double,BB::float2_,Div,operator/)
Matrix_Scalar_CudaMatrix(double,BB::double2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::int2_,int,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::int2_,float,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::int2_,double,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::int2_,BB::int2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::int2_,BB::float2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::int2_,BB::double2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::float2_,int,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::float2_,float,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::float2_,double,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::float2_,BB::int2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::float2_,BB::float2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::float2_,BB::double2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::double2_,int,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::double2_,float,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::double2_,double,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::double2_,BB::int2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::double2_,BB::float2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::double2_,BB::double2_,Div,operator/)
Scalar_Matrix_Matrix(int,int,Div,operator/)
Scalar_Matrix_Matrix(int,float,Div,operator/)
Scalar_Matrix_Matrix(int,double,Div,operator/)
Scalar_Matrix_Matrix(int,BB::int2_,Div,operator/)
Scalar_Matrix_Matrix(int,BB::float2_,Div,operator/)
Scalar_Matrix_Matrix(int,BB::double2_,Div,operator/)
Scalar_Matrix_Matrix(float,int,Div,operator/)
Scalar_Matrix_Matrix(float,float,Div,operator/)
Scalar_Matrix_Matrix(float,double,Div,operator/)
Scalar_Matrix_Matrix(float,BB::int2_,Div,operator/)
Scalar_Matrix_Matrix(float,BB::float2_,Div,operator/)
Scalar_Matrix_Matrix(float,BB::double2_,Div,operator/)
Scalar_Matrix_Matrix(double,int,Div,operator/)
Scalar_Matrix_Matrix(double,float,Div,operator/)
Scalar_Matrix_Matrix(double,double,Div,operator/)
Scalar_Matrix_Matrix(double,BB::int2_,Div,operator/)
Scalar_Matrix_Matrix(double,BB::float2_,Div,operator/)
Scalar_Matrix_Matrix(double,BB::double2_,Div,operator/)
Scalar_Matrix_Matrix(BB::int2_,int,Div,operator/)
Scalar_Matrix_Matrix(BB::int2_,float,Div,operator/)
Scalar_Matrix_Matrix(BB::int2_,double,Div,operator/)
Scalar_Matrix_Matrix(BB::int2_,BB::int2_,Div,operator/)
Scalar_Matrix_Matrix(BB::int2_,BB::float2_,Div,operator/)
Scalar_Matrix_Matrix(BB::int2_,BB::double2_,Div,operator/)
Scalar_Matrix_Matrix(BB::float2_,int,Div,operator/)
Scalar_Matrix_Matrix(BB::float2_,float,Div,operator/)
Scalar_Matrix_Matrix(BB::float2_,double,Div,operator/)
Scalar_Matrix_Matrix(BB::float2_,BB::int2_,Div,operator/)
Scalar_Matrix_Matrix(BB::float2_,BB::float2_,Div,operator/)
Scalar_Matrix_Matrix(BB::float2_,BB::double2_,Div,operator/)
Scalar_Matrix_Matrix(BB::double2_,int,Div,operator/)
Scalar_Matrix_Matrix(BB::double2_,float,Div,operator/)
Scalar_Matrix_Matrix(BB::double2_,double,Div,operator/)
Scalar_Matrix_Matrix(BB::double2_,BB::int2_,Div,operator/)
Scalar_Matrix_Matrix(BB::double2_,BB::float2_,Div,operator/)
Scalar_Matrix_Matrix(BB::double2_,BB::double2_,Div,operator/)
Scalar_Matrix_CudaMatrix(int,int,Div,operator/)
Scalar_Matrix_CudaMatrix(int,float,Div,operator/)
Scalar_Matrix_CudaMatrix(int,double,Div,operator/)
Scalar_Matrix_CudaMatrix(int,BB::int2_,Div,operator/)
Scalar_Matrix_CudaMatrix(int,BB::float2_,Div,operator/)
Scalar_Matrix_CudaMatrix(int,BB::double2_,Div,operator/)
Scalar_Matrix_CudaMatrix(float,int,Div,operator/)
Scalar_Matrix_CudaMatrix(float,float,Div,operator/)
Scalar_Matrix_CudaMatrix(float,double,Div,operator/)
Scalar_Matrix_CudaMatrix(float,BB::int2_,Div,operator/)
Scalar_Matrix_CudaMatrix(float,BB::float2_,Div,operator/)
Scalar_Matrix_CudaMatrix(float,BB::double2_,Div,operator/)
Scalar_Matrix_CudaMatrix(double,int,Div,operator/)
Scalar_Matrix_CudaMatrix(double,float,Div,operator/)
Scalar_Matrix_CudaMatrix(double,double,Div,operator/)
Scalar_Matrix_CudaMatrix(double,BB::int2_,Div,operator/)
Scalar_Matrix_CudaMatrix(double,BB::float2_,Div,operator/)
Scalar_Matrix_CudaMatrix(double,BB::double2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::int2_,int,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::int2_,float,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::int2_,double,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::int2_,BB::int2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::int2_,BB::float2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::int2_,BB::double2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::float2_,int,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::float2_,float,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::float2_,double,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::float2_,BB::int2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::float2_,BB::float2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::float2_,BB::double2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::double2_,int,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::double2_,float,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::double2_,double,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::double2_,BB::int2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::double2_,BB::float2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::double2_,BB::double2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(int,int,Div,operator/)
Matrix_Matrix_Matrix_Prototype(int,float,Div,operator/)
Matrix_Matrix_Matrix_Prototype(int,double,Div,operator/)
Matrix_Matrix_Matrix_Prototype(int,BB::int2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(int,BB::float2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(int,BB::double2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(float,int,Div,operator/)
Matrix_Matrix_Matrix_Prototype(float,float,Div,operator/)
Matrix_Matrix_Matrix_Prototype(float,double,Div,operator/)
Matrix_Matrix_Matrix_Prototype(float,BB::int2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(float,BB::float2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(float,BB::double2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(double,int,Div,operator/)
Matrix_Matrix_Matrix_Prototype(double,float,Div,operator/)
Matrix_Matrix_Matrix_Prototype(double,double,Div,operator/)
Matrix_Matrix_Matrix_Prototype(double,BB::int2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(double,BB::float2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(double,BB::double2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::int2_,int,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::int2_,float,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::int2_,double,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::int2_,BB::int2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::int2_,BB::float2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::int2_,BB::double2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::float2_,int,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::float2_,float,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::float2_,double,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::float2_,BB::int2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::float2_,BB::float2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::float2_,BB::double2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::double2_,int,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::double2_,float,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::double2_,double,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::double2_,BB::int2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::double2_,BB::float2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::double2_,BB::double2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(int,int,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(int,float,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(int,double,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(int,BB::int2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(int,BB::float2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(int,BB::double2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(float,int,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(float,float,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(float,double,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(float,BB::int2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(float,BB::float2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(float,BB::double2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(double,int,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(double,float,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(double,double,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(double,BB::int2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(double,BB::float2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(double,BB::double2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,int,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,float,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,double,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,BB::int2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,BB::float2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,BB::double2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,int,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,float,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,double,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,BB::int2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,BB::float2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,BB::double2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,int,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,float,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,double,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,BB::int2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,BB::float2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,BB::double2_,Div,operator/)
| 272031965b26d6f1d5439fdaaeb49c1723d1881e.cu | /* Bluebird Library - High performance CPUs and GPUs computing library.
*
* Copyright (C) 2012-2013 Orange Owl Solutions.
*
* This file is part of Bluebird Library.
* Bluebird Library is free software: you can redistribute it and/or modify
* it under the terms of the Lesser GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Bluebird Library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Lesser GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Bluebird Library. If not, see <http://www.gnu.org/licenses/>.
*
*
* For any request, question or bug reporting please visit http://www.orangeowlsolutions.com/
* or send an e-mail to: [email protected]
*
*
*/
#include "BB.h"
#include "HExceptions.h"
#include "Hmatrix.h"
#include "Dmatrix.cuh"
#include "Division.cuh"
#include "Expression.cuh"
#include "Promotion.cuh"
#include "Scalar.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
#define Scalar_Scalar(T1,T2,OpClass,overloaded_operator) template <> typename BB::Promotion<T1,T2>::strongest BB::overloaded_operator(const T1 a,const T2 b) { return BB::OpClass::eval(a,b); }
Promotion<int,int2_>::strongest operator/(const int a,const int2_ b) { return BB::Div::eval(a,b); }
Promotion<int,float2_>::strongest operator/(const int a,const float2_ b) { return BB::Div::eval(a,b); }
Promotion<int,double2_>::strongest operator/(const int a,const double2_ b) { return BB::Div::eval(a,b); }
Promotion<float,int2_>::strongest operator/(const float a,const int2_ b) { return BB::Div::eval(a,b); }
Promotion<float,float2_>::strongest operator/(const float a,const float2_ b) { return BB::Div::eval(a,b); }
Promotion<float,double2_>::strongest operator/(const float a,const double2_ b) { return BB::Div::eval(a,b); }
Promotion<double,int2_>::strongest operator/(const double a,const int2_ b) { return BB::Div::eval(a,b); }
Promotion<double,float2_>::strongest operator/(const double a,const float2_ b) { return BB::Div::eval(a,b); }
Promotion<double,double2_>::strongest operator/(const double a,const double2_ b) { return BB::Div::eval(a,b); }
Promotion<int2_,int>::strongest operator/(const int2_ a,const int b) { return BB::Div::eval(a,b); }
Promotion<int2_,float>::strongest operator/(const int2_ a,const float b) { return BB::Div::eval(a,b); }
Promotion<int2_,double>::strongest operator/(const int2_ a,const double b) { return BB::Div::eval(a,b); }
Promotion<int2_,int2_>::strongest operator/(const int2_ a,const int2_ b) { return BB::Div::eval(a,b); }
Promotion<int2_,float2_>::strongest operator/(const int2_ a,const float2_ b) { return BB::Div::eval(a,b); }
Promotion<int2_,double2_>::strongest operator/(const int2_ a,const double2_ b) { return BB::Div::eval(a,b); }
Promotion<float2_,int>::strongest operator/(const float2_ a,const int b) { return BB::Div::eval(a,b); }
Promotion<float2_,float>::strongest operator/(const float2_ a,const float b) { return BB::Div::eval(a,b); }
Promotion<float2_,double>::strongest operator/(const float2_ a,const double b) { return BB::Div::eval(a,b); }
Promotion<float2_,int2_>::strongest operator/(const float2_ a,const int2_ b) { return BB::Div::eval(a,b); }
Promotion<float2_,float2_>::strongest operator/(const float2_ a,const float2_ b) { return BB::Div::eval(a,b); }
Promotion<float2_,double2_>::strongest operator/(const float2_ a,const double2_ b) { return BB::Div::eval(a,b); }
Promotion<double2_,int>::strongest operator/(const double2_ a,const int b) { return BB::Div::eval(a,b); }
Promotion<double2_,float>::strongest operator/(const double2_ a,const float b) { return BB::Div::eval(a,b); }
Promotion<double2_,double>::strongest operator/(const double2_ a,const double b) { return BB::Div::eval(a,b); }
Promotion<double2_,int2_>::strongest operator/(const double2_ a,const int2_ b) { return BB::Div::eval(a,b); }
Promotion<double2_,float2_>::strongest operator/(const double2_ a,const float2_ b) { return BB::Div::eval(a,b); }
Promotion<double2_,double2_>::strongest operator/(const double2_ a,const double2_ b) { return BB::Div::eval(a,b); }
#define Matrix_Scalar_Matrix(T1,T2,OpClass,overloaded_operator) template <> \
BB::Expr<BB::BinExpr<const T1*,const BB::Scalar<T2>,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Hmatrix<T1> &v1,const T2 v2) \
{ \
BB::Scalar<T2> c(v2); \
typedef BB::BinExpr<const T1*,const BB::Scalar<T2>,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \
return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(v1.GetDataPointer(),c),v1.GetRows(),v1.GetColumns(),ISHOST); \
}
#define Matrix_Scalar_CudaMatrix(T1,T2,OpClass,overloaded_operator) template <> \
BB::Expr<BB::BinExpr<const T1*,const BB::Scalar<T2>,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Dmatrix<T1> &v1,const T2 v2) \
{ \
BB::Scalar<T2> c(v2); \
typedef BB::BinExpr<const T1*,const BB::Scalar<T2>,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \
return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(v1.GetDataPointer(),c),v1.GetRows(),v1.GetColumns(),ISDEVICE); \
}
#define Scalar_Matrix_Matrix(T1,T2,OpClass,overloaded_operator) template <> BB::Expr<BB::BinExpr<const BB::Scalar<T2>,const T1*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const T2 v1,const BB::Hmatrix<T1> &v2) \
{ \
BB::Scalar<T2> c(v1); \
typedef BB::BinExpr<const BB::Scalar<T2>,const T1*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \
return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(c,v2.GetDataPointer()),v2.GetRows(),v2.GetColumns(),ISHOST); \
}
#define Scalar_Matrix_CudaMatrix(T1,T2,OpClass,overloaded_operator) template <> BB::Expr<BB::BinExpr<const BB::Scalar<T2>,const T1*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const T2 v1,const BB::Dmatrix<T1> &v2) \
{ \
BB::Scalar<T2> c(v1); \
typedef BB::BinExpr<const BB::Scalar<T2>,const T1*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \
return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(c,v2.GetDataPointer()),v2.GetRows(),v2.GetColumns(),ISDEVICE); \
}
// Hmatrix-Hmatrix Hmatrix -- TESTED
#define Matrix_Matrix_Matrix(T1,T2,OpClass,overloaded_operator) template <class T1,class T2> \
BB::Expr<BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Hmatrix<T1> &v1,const BB::Hmatrix<T2> &v2) \
{ \
if((v1.GetRows() == v2.GetRows()) && (v1.GetColumns() == v2.GetColumns())) \
{ \
typedef BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \
return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(v1.GetDataPointer(),v2.GetDataPointer()),v1.GetRows(),v1.GetColumns(),ISHOST); \
} else { char* str0 = "****************************************************************\n"; \
char* str1 = "* Size mismatch in binary CPU matrix operation (matrix-matrix) *\n"; \
char* str2 = "Left operand size: "; \
char* str3 = "Right operand size: "; \
char* str4 = "Operation: "; \
char* catString = (char*) malloc(2*strlen(str0)+strlen(str1)+strlen(str2)+strlen(str3)+strlen(str4)+50*sizeof(char)); \
sprintf(catString, "%s%s%s\n%s%i x %i\n%s%i x %i\n%s%s",str0,str1,str0,str2,v1.GetRows(),v1.GetColumns(),str3,v2.GetRows(),v2.GetColumns(),str4,typeid(BB::OpClass).name()); \
throw BB::GenericError(catString,__FILE__,__LINE__); \
} \
}
// Hmatrix-Hmatrix Dmatrix -- TESTED
#define Matrix_Matrix_CudaMatrix(T1,T2,OpClass,overloaded_operator) template <class T1,class T2> \
BB::Expr<BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Dmatrix<T1> &v1,const BB::Dmatrix<T2> &v2) \
{ \
if((v1.GetRows() == v2.GetRows()) && (v1.GetColumns() == v2.GetColumns())) \
{ \
typedef BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \
return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(v1.GetDataPointer(),v2.GetDataPointer()),v1.GetRows(),v1.GetColumns(),ISDEVICE); \
} else { char* str0 = "****************************************************************\n"; \
char* str1 = "* Size mismatch in binary GPU matrix operation (matrix-matrix) *\n"; \
char* str2 = "Left operand size: "; \
char* str3 = "Right operand size: "; \
char* str4 = "Operation: "; \
char* catString = (char*) malloc(2*strlen(str0)+strlen(str1)+strlen(str2)+strlen(str3)+strlen(str4)+50*sizeof(char)); \
sprintf(catString, "%s%s%s\n%s%i x %i\n%s%i x %i\n%s%s",str0,str1,str0,str2,v1.GetRows(),v1.GetColumns(),str3,v2.GetRows(),v2.GetColumns(),str4,typeid(BB::OpClass).name()); \
throw BB::GenericError(catString,__FILE__,__LINE__); \
} \
}
Matrix_Matrix_Matrix(T1,T2,Div,operator/)
Matrix_Matrix_CudaMatrix(T1,T2,Div,operator/)
// Hmatrix-Hmatrix Hmatrix -- TESTED
#define Matrix_Matrix_Matrix_Prototype(T1,T2,OpClass,overloaded_operator) template BB::Expr<BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Hmatrix<T1>&,const BB::Hmatrix<T2>&);
// Hmatrix-Hmatrix Dmatrix -- TESTED
#define Matrix_Matrix_CudaMatrix_Prototype(T1,T2,OpClass,overloaded_operator) template BB::Expr<BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Dmatrix<T1>&,const BB::Dmatrix<T2>&);
Matrix_Scalar_Matrix(int,int,Div,operator/)
Matrix_Scalar_Matrix(int,float,Div,operator/)
Matrix_Scalar_Matrix(int,double,Div,operator/)
Matrix_Scalar_Matrix(int,BB::int2_,Div,operator/)
Matrix_Scalar_Matrix(int,BB::float2_,Div,operator/)
Matrix_Scalar_Matrix(int,BB::double2_,Div,operator/)
Matrix_Scalar_Matrix(float,int,Div,operator/)
Matrix_Scalar_Matrix(float,float,Div,operator/)
Matrix_Scalar_Matrix(float,double,Div,operator/)
Matrix_Scalar_Matrix(float,BB::int2_,Div,operator/)
Matrix_Scalar_Matrix(float,BB::float2_,Div,operator/)
Matrix_Scalar_Matrix(float,BB::double2_,Div,operator/)
Matrix_Scalar_Matrix(double,int,Div,operator/)
Matrix_Scalar_Matrix(double,float,Div,operator/)
Matrix_Scalar_Matrix(double,double,Div,operator/)
Matrix_Scalar_Matrix(double,BB::int2_,Div,operator/)
Matrix_Scalar_Matrix(double,BB::float2_,Div,operator/)
Matrix_Scalar_Matrix(double,BB::double2_,Div,operator/)
Matrix_Scalar_Matrix(BB::int2_,int,Div,operator/)
Matrix_Scalar_Matrix(BB::int2_,float,Div,operator/)
Matrix_Scalar_Matrix(BB::int2_,double,Div,operator/)
Matrix_Scalar_Matrix(BB::int2_,BB::int2_,Div,operator/)
Matrix_Scalar_Matrix(BB::int2_,BB::float2_,Div,operator/)
Matrix_Scalar_Matrix(BB::int2_,BB::double2_,Div,operator/)
Matrix_Scalar_Matrix(BB::float2_,int,Div,operator/)
Matrix_Scalar_Matrix(BB::float2_,float,Div,operator/)
Matrix_Scalar_Matrix(BB::float2_,double,Div,operator/)
Matrix_Scalar_Matrix(BB::float2_,BB::int2_,Div,operator/)
Matrix_Scalar_Matrix(BB::float2_,BB::float2_,Div,operator/)
Matrix_Scalar_Matrix(BB::float2_,BB::double2_,Div,operator/)
Matrix_Scalar_Matrix(BB::double2_,int,Div,operator/)
Matrix_Scalar_Matrix(BB::double2_,float,Div,operator/)
Matrix_Scalar_Matrix(BB::double2_,double,Div,operator/)
Matrix_Scalar_Matrix(BB::double2_,BB::int2_,Div,operator/)
Matrix_Scalar_Matrix(BB::double2_,BB::float2_,Div,operator/)
Matrix_Scalar_Matrix(BB::double2_,BB::double2_,Div,operator/)
Matrix_Scalar_CudaMatrix(int,int,Div,operator/)
Matrix_Scalar_CudaMatrix(int,float,Div,operator/)
Matrix_Scalar_CudaMatrix(int,double,Div,operator/)
Matrix_Scalar_CudaMatrix(int,BB::int2_,Div,operator/)
Matrix_Scalar_CudaMatrix(int,BB::float2_,Div,operator/)
Matrix_Scalar_CudaMatrix(int,BB::double2_,Div,operator/)
Matrix_Scalar_CudaMatrix(float,int,Div,operator/)
Matrix_Scalar_CudaMatrix(float,float,Div,operator/)
Matrix_Scalar_CudaMatrix(float,double,Div,operator/)
Matrix_Scalar_CudaMatrix(float,BB::int2_,Div,operator/)
Matrix_Scalar_CudaMatrix(float,BB::float2_,Div,operator/)
Matrix_Scalar_CudaMatrix(float,BB::double2_,Div,operator/)
Matrix_Scalar_CudaMatrix(double,int,Div,operator/)
Matrix_Scalar_CudaMatrix(double,float,Div,operator/)
Matrix_Scalar_CudaMatrix(double,double,Div,operator/)
Matrix_Scalar_CudaMatrix(double,BB::int2_,Div,operator/)
Matrix_Scalar_CudaMatrix(double,BB::float2_,Div,operator/)
Matrix_Scalar_CudaMatrix(double,BB::double2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::int2_,int,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::int2_,float,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::int2_,double,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::int2_,BB::int2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::int2_,BB::float2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::int2_,BB::double2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::float2_,int,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::float2_,float,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::float2_,double,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::float2_,BB::int2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::float2_,BB::float2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::float2_,BB::double2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::double2_,int,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::double2_,float,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::double2_,double,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::double2_,BB::int2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::double2_,BB::float2_,Div,operator/)
Matrix_Scalar_CudaMatrix(BB::double2_,BB::double2_,Div,operator/)
Scalar_Matrix_Matrix(int,int,Div,operator/)
Scalar_Matrix_Matrix(int,float,Div,operator/)
Scalar_Matrix_Matrix(int,double,Div,operator/)
Scalar_Matrix_Matrix(int,BB::int2_,Div,operator/)
Scalar_Matrix_Matrix(int,BB::float2_,Div,operator/)
Scalar_Matrix_Matrix(int,BB::double2_,Div,operator/)
Scalar_Matrix_Matrix(float,int,Div,operator/)
Scalar_Matrix_Matrix(float,float,Div,operator/)
Scalar_Matrix_Matrix(float,double,Div,operator/)
Scalar_Matrix_Matrix(float,BB::int2_,Div,operator/)
Scalar_Matrix_Matrix(float,BB::float2_,Div,operator/)
Scalar_Matrix_Matrix(float,BB::double2_,Div,operator/)
Scalar_Matrix_Matrix(double,int,Div,operator/)
Scalar_Matrix_Matrix(double,float,Div,operator/)
Scalar_Matrix_Matrix(double,double,Div,operator/)
Scalar_Matrix_Matrix(double,BB::int2_,Div,operator/)
Scalar_Matrix_Matrix(double,BB::float2_,Div,operator/)
Scalar_Matrix_Matrix(double,BB::double2_,Div,operator/)
Scalar_Matrix_Matrix(BB::int2_,int,Div,operator/)
Scalar_Matrix_Matrix(BB::int2_,float,Div,operator/)
Scalar_Matrix_Matrix(BB::int2_,double,Div,operator/)
Scalar_Matrix_Matrix(BB::int2_,BB::int2_,Div,operator/)
Scalar_Matrix_Matrix(BB::int2_,BB::float2_,Div,operator/)
Scalar_Matrix_Matrix(BB::int2_,BB::double2_,Div,operator/)
Scalar_Matrix_Matrix(BB::float2_,int,Div,operator/)
Scalar_Matrix_Matrix(BB::float2_,float,Div,operator/)
Scalar_Matrix_Matrix(BB::float2_,double,Div,operator/)
Scalar_Matrix_Matrix(BB::float2_,BB::int2_,Div,operator/)
Scalar_Matrix_Matrix(BB::float2_,BB::float2_,Div,operator/)
Scalar_Matrix_Matrix(BB::float2_,BB::double2_,Div,operator/)
Scalar_Matrix_Matrix(BB::double2_,int,Div,operator/)
Scalar_Matrix_Matrix(BB::double2_,float,Div,operator/)
Scalar_Matrix_Matrix(BB::double2_,double,Div,operator/)
Scalar_Matrix_Matrix(BB::double2_,BB::int2_,Div,operator/)
Scalar_Matrix_Matrix(BB::double2_,BB::float2_,Div,operator/)
Scalar_Matrix_Matrix(BB::double2_,BB::double2_,Div,operator/)
Scalar_Matrix_CudaMatrix(int,int,Div,operator/)
Scalar_Matrix_CudaMatrix(int,float,Div,operator/)
Scalar_Matrix_CudaMatrix(int,double,Div,operator/)
Scalar_Matrix_CudaMatrix(int,BB::int2_,Div,operator/)
Scalar_Matrix_CudaMatrix(int,BB::float2_,Div,operator/)
Scalar_Matrix_CudaMatrix(int,BB::double2_,Div,operator/)
Scalar_Matrix_CudaMatrix(float,int,Div,operator/)
Scalar_Matrix_CudaMatrix(float,float,Div,operator/)
Scalar_Matrix_CudaMatrix(float,double,Div,operator/)
Scalar_Matrix_CudaMatrix(float,BB::int2_,Div,operator/)
Scalar_Matrix_CudaMatrix(float,BB::float2_,Div,operator/)
Scalar_Matrix_CudaMatrix(float,BB::double2_,Div,operator/)
Scalar_Matrix_CudaMatrix(double,int,Div,operator/)
Scalar_Matrix_CudaMatrix(double,float,Div,operator/)
Scalar_Matrix_CudaMatrix(double,double,Div,operator/)
Scalar_Matrix_CudaMatrix(double,BB::int2_,Div,operator/)
Scalar_Matrix_CudaMatrix(double,BB::float2_,Div,operator/)
Scalar_Matrix_CudaMatrix(double,BB::double2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::int2_,int,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::int2_,float,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::int2_,double,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::int2_,BB::int2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::int2_,BB::float2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::int2_,BB::double2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::float2_,int,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::float2_,float,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::float2_,double,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::float2_,BB::int2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::float2_,BB::float2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::float2_,BB::double2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::double2_,int,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::double2_,float,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::double2_,double,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::double2_,BB::int2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::double2_,BB::float2_,Div,operator/)
Scalar_Matrix_CudaMatrix(BB::double2_,BB::double2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(int,int,Div,operator/)
Matrix_Matrix_Matrix_Prototype(int,float,Div,operator/)
Matrix_Matrix_Matrix_Prototype(int,double,Div,operator/)
Matrix_Matrix_Matrix_Prototype(int,BB::int2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(int,BB::float2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(int,BB::double2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(float,int,Div,operator/)
Matrix_Matrix_Matrix_Prototype(float,float,Div,operator/)
Matrix_Matrix_Matrix_Prototype(float,double,Div,operator/)
Matrix_Matrix_Matrix_Prototype(float,BB::int2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(float,BB::float2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(float,BB::double2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(double,int,Div,operator/)
Matrix_Matrix_Matrix_Prototype(double,float,Div,operator/)
Matrix_Matrix_Matrix_Prototype(double,double,Div,operator/)
Matrix_Matrix_Matrix_Prototype(double,BB::int2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(double,BB::float2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(double,BB::double2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::int2_,int,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::int2_,float,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::int2_,double,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::int2_,BB::int2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::int2_,BB::float2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::int2_,BB::double2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::float2_,int,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::float2_,float,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::float2_,double,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::float2_,BB::int2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::float2_,BB::float2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::float2_,BB::double2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::double2_,int,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::double2_,float,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::double2_,double,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::double2_,BB::int2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::double2_,BB::float2_,Div,operator/)
Matrix_Matrix_Matrix_Prototype(BB::double2_,BB::double2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(int,int,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(int,float,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(int,double,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(int,BB::int2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(int,BB::float2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(int,BB::double2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(float,int,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(float,float,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(float,double,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(float,BB::int2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(float,BB::float2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(float,BB::double2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(double,int,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(double,float,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(double,double,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(double,BB::int2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(double,BB::float2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(double,BB::double2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,int,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,float,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,double,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,BB::int2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,BB::float2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,BB::double2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,int,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,float,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,double,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,BB::int2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,BB::float2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,BB::double2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,int,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,float,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,double,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,BB::int2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,BB::float2_,Div,operator/)
Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,BB::double2_,Div,operator/)
|
b6d1ec13e2a347e6dae42e1e2dcb8fe8d929f14f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <float.h>
#include <math.h>
#include <time.h>
#include "../../constants.h"
#define N_RADIUS 4
#define N_THREADS_PER_PLANE_DIM 16
__global__ void target_inner_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta
) {
const llint j0 = y3 + blockIdx.y * blockDim.y;
const llint k0 = z3 + blockIdx.x * blockDim.x;
const llint je = min(j0 + N_THREADS_PER_PLANE_DIM, y4);
const llint ke = min(k0 + N_THREADS_PER_PLANE_DIM, z4);
const llint j = j0 + threadIdx.y;
const llint k = k0 + threadIdx.x;
float infront1, infront2, infront3, infront4; // variables for input in front of the current slice
float behind1, behind2, behind3, behind4; // variables for input behind the current slice
float current; // input value in the current slice
behind3 = u[IDX3_l(x3-4,j,k)];
behind2 = u[IDX3_l(x3-3,j,k)];
behind1 = u[IDX3_l(x3-2,j,k)];
current = u[IDX3_l(x3-1,j,k)];
infront1 = u[IDX3_l(x3+0,j,k)];
infront2 = u[IDX3_l(x3+1,j,k)];
infront3 = u[IDX3_l(x3+2,j,k)];
infront4 = u[IDX3_l(x3+3,j,k)];
for (llint i = x3; i < x4; i++) {
// advance the slice (move the thread-front)
behind4 = behind3;
behind3 = behind2;
behind2 = behind1;
behind1 = current;
current = infront1;
infront1 = infront2;
infront2 = infront3;
infront3 = infront4;
infront4 = u[IDX3_l(i+N_RADIUS,j,k)];
__syncthreads();
if (j < y4 && k < z4) {
float lap = __fmaf_rn(coef0, u[IDX3_l(i,j,k)]
, __fmaf_rn(coefx_1, __fadd_rn(infront1,behind1)
, __fmaf_rn(coefy_1, __fadd_rn(u[IDX3_l(i,j+1,k)],u[IDX3_l(i,j-1,k)])
, __fmaf_rn(coefz_1, __fadd_rn(u[IDX3_l(i,j,k+1)],u[IDX3_l(i,j,k-1)])
, __fmaf_rn(coefx_2, __fadd_rn(infront2,behind2)
, __fmaf_rn(coefy_2, __fadd_rn(u[IDX3_l(i,j+2,k)],u[IDX3_l(i,j-2,k)])
, __fmaf_rn(coefz_2, __fadd_rn(u[IDX3_l(i,j,k+2)],u[IDX3_l(i,j,k-2)])
, __fmaf_rn(coefx_3, __fadd_rn(infront3,behind3)
, __fmaf_rn(coefy_3, __fadd_rn(u[IDX3_l(i,j+3,k)],u[IDX3_l(i,j-3,k)])
, __fmaf_rn(coefz_3, __fadd_rn(u[IDX3_l(i,j,k+3)],u[IDX3_l(i,j,k-3)])
, __fmaf_rn(coefx_4, __fadd_rn(infront4,behind4)
, __fmaf_rn(coefy_4, __fadd_rn(u[IDX3_l(i,j+4,k)],u[IDX3_l(i,j-4,k)])
, __fmul_rn(coefz_4, __fadd_rn(u[IDX3_l(i,j,k+4)],u[IDX3_l(i,j,k-4)])
)))))))))))));
v[IDX3_l(i,j,k)] = __fmaf_rn(2.f, current,
__fmaf_rn(vp[IDX3(i,j,k)], lap, -v[IDX3_l(i,j,k)])
);
}
}
}
__global__ void target_pml_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
float *__restrict__ phi, const float *__restrict__ eta
) {
const llint j0 = y3 + blockIdx.y * blockDim.y;
const llint k0 = z3 + blockIdx.x * blockDim.x;
const llint je = min(j0 + N_THREADS_PER_PLANE_DIM, y4);
const llint ke = min(k0 + N_THREADS_PER_PLANE_DIM, z4);
const llint j = j0 + threadIdx.y;
const llint k = k0 + threadIdx.x;
float infront1, infront2, infront3, infront4; // variables for input in front of the current slice
float behind1, behind2, behind3, behind4; // variables for input behind the current slice
float current; // input value in the current slice
float infrontEta, behindEta, currentEta;
behind3 = u[IDX3_l(x3-4,j,k)];
behind2 = u[IDX3_l(x3-3,j,k)];
behind1 = u[IDX3_l(x3-2,j,k)];
current = u[IDX3_l(x3-1,j,k)];
infront1 = u[IDX3_l(x3+0,j,k)];
infront2 = u[IDX3_l(x3+1,j,k)];
infront3 = u[IDX3_l(x3+2,j,k)];
infront4 = u[IDX3_l(x3+3,j,k)];
currentEta = eta[IDX3_eta1(x3-1,j,k)];
infrontEta = eta[IDX3_eta1(x3,j,k)];
for (llint i = x3; i < x4; i++) {
// advance the slice (move the thread-front)
behind4 = behind3;
behind3 = behind2;
behind2 = behind1;
behind1 = current;
current = infront1;
infront1 = infront2;
infront2 = infront3;
infront3 = infront4;
infront4 = u[IDX3_l(i+N_RADIUS,j,k)];
behindEta = currentEta;
currentEta = infrontEta;
infrontEta = eta[IDX3_eta1(i+1,j,k)];
__syncthreads();
if (j < y4 && k < z4) {
float lap = __fmaf_rn(coef0, u[IDX3_l(i,j,k)]
, __fmaf_rn(coefx_1, __fadd_rn(infront1,behind1)
, __fmaf_rn(coefy_1, __fadd_rn(u[IDX3_l(i,j+1,k)],u[IDX3_l(i,j-1,k)])
, __fmaf_rn(coefz_1, __fadd_rn(u[IDX3_l(i,j,k+1)],u[IDX3_l(i,j,k-1)])
, __fmaf_rn(coefx_2, __fadd_rn(infront2,behind2)
, __fmaf_rn(coefy_2, __fadd_rn(u[IDX3_l(i,j+2,k)],u[IDX3_l(i,j-2,k)])
, __fmaf_rn(coefz_2, __fadd_rn(u[IDX3_l(i,j,k+2)],u[IDX3_l(i,j,k-2)])
, __fmaf_rn(coefx_3, __fadd_rn(infront3,behind3)
, __fmaf_rn(coefy_3, __fadd_rn(u[IDX3_l(i,j+3,k)],u[IDX3_l(i,j-3,k)])
, __fmaf_rn(coefz_3, __fadd_rn(u[IDX3_l(i,j,k+3)],u[IDX3_l(i,j,k-3)])
, __fmaf_rn(coefx_4, __fadd_rn(infront4,behind4)
, __fmaf_rn(coefy_4, __fadd_rn(u[IDX3_l(i,j+4,k)],u[IDX3_l(i,j-4,k)])
, __fmul_rn(coefz_4, __fadd_rn(u[IDX3_l(i,j,k+4)],u[IDX3_l(i,j,k-4)])
)))))))))))));
v[IDX3_l(i,j,k)] = __fdiv_rn(
__fmaf_rn(
__fmaf_rn(2.f, currentEta,
__fsub_rn(2.f,
__fmul_rn(currentEta, currentEta)
)
),
current,
__fmaf_rn(
vp[IDX3(i,j,k)],
__fadd_rn(lap, phi[IDX3(i,j,k)]),
-v[IDX3_l(i,j,k)]
)
),
__fmaf_rn(2.f, currentEta, 1.f)
);
phi[IDX3(i,j,k)] = __fdiv_rn(
__fsub_rn(
phi[IDX3(i,j,k)],
__fmaf_rn(
__fmul_rn(
__fsub_rn(infrontEta,behindEta),
__fsub_rn(infront1,behind1)
), hdx_2,
__fmaf_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i,j+1,k)], eta[IDX3_eta1(i,j-1,k)]),
__fsub_rn(u[IDX3_l(i,j+1,k)], u[IDX3_l(i,j-1,k)])
), hdy_2,
__fmul_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i,j,k+1)], eta[IDX3_eta1(i,j,k-1)]),
__fsub_rn(u[IDX3_l(i,j,k+1)], u[IDX3_l(i,j,k-1)])
),
hdz_2)
))
)
,
__fadd_rn(1.f, currentEta)
);
}
}
}
__global__ void kernel_add_source_kernel(float *g_u, llint idx, float source) {
g_u[idx] += source;
}
extern "C" void target(
uint nsteps, double *time_kernel,
llint nx, llint ny, llint nz,
llint x1, llint x2, llint x3, llint x4, llint x5, llint x6,
llint y1, llint y2, llint y3, llint y4, llint y5, llint y6,
llint z1, llint z2, llint z3, llint z4, llint z5, llint z6,
llint lx, llint ly, llint lz,
llint sx, llint sy, llint sz,
float hdx_2, float hdy_2, float hdz_2,
const float *__restrict__ coefx, const float *__restrict__ coefy, const float *__restrict__ coefz,
float *__restrict__ u, const float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta, const float *__restrict__ source
) {
struct timespec start, end;
const llint size_u = (nx + 2 * lx) * (ny + 2 * ly) * (nz + 2 * lz);
const llint size_v = size_u;
const llint size_phi = nx*ny*nz;
const llint size_vp = size_phi;
const llint size_eta = (nx+2)*(ny+2)*(nz+2);
float *d_u, *d_v, *d_vp, *d_phi, *d_eta;
hipMalloc(&d_u, sizeof(float) * size_u);
hipMalloc(&d_v, sizeof(float) * size_u);
hipMalloc(&d_vp, sizeof(float) * size_vp);
hipMalloc(&d_phi, sizeof(float) * size_phi);
hipMalloc(&d_eta, sizeof(float) * size_eta);
hipMemcpy(d_u, u, sizeof(float) * size_u, hipMemcpyHostToDevice);
hipMemcpy(d_v, v, sizeof(float) * size_v, hipMemcpyHostToDevice);
hipMemcpy(d_vp, vp, sizeof(float) * size_vp, hipMemcpyHostToDevice);
hipMemcpy(d_phi, phi, sizeof(float) * size_phi, hipMemcpyHostToDevice);
hipMemcpy(d_eta, eta, sizeof(float) * size_eta, hipMemcpyHostToDevice);
const llint xmin = 0; const llint xmax = nx;
const llint ymin = 0; const llint ymax = ny;
dim3 threadsPerBlock(N_THREADS_PER_PLANE_DIM, N_THREADS_PER_PLANE_DIM, 1);
int num_streams = 7;
hipStream_t streams[num_streams];
for (int i = 0; i < num_streams; i++) {
hipStreamCreateWithFlags(&(streams[i]), hipStreamNonBlocking);
}
const uint npo = 100;
for (uint istep = 1; istep <= nsteps; ++istep) {
clock_gettime(CLOCK_REALTIME, &start);
dim3 n_block_front(
(z2-z1+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
(ny+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_front), dim3(threadsPerBlock), 0, streams[1], nx,ny,nz,
xmin,xmax,ymin,ymax,z1,z2,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_top(
(z4-z3+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
(y2-y1+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_top), dim3(threadsPerBlock), 0, streams[2], nx,ny,nz,
xmin,xmax,y1,y2,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_left(
(z4-z3+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
(y4-y3+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
1);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_left), dim3(threadsPerBlock), 0, streams[3], nx,ny,nz,
x1,x2,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_center(
(z4-z3+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
(y4-y3+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM);
hipLaunchKernelGGL(( target_inner_3d_kernel), dim3(n_block_center), dim3(threadsPerBlock), 0, streams[0], nx,ny,nz,
x3,x4,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_right(
(z4-z3+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
(y4-y3+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
1);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_right), dim3(threadsPerBlock), 0, streams[4], nx,ny,nz,
x5,x6,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_bottom(
(z4-z3+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
(y6-y5+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
1);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_bottom), dim3(threadsPerBlock), 0, streams[5], nx,ny,nz,
xmin,xmax,y5,y6,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_back(
(z6-z5+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
(ny+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
1);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_back), dim3(threadsPerBlock), 0, streams[6], nx,ny,nz,
xmin,xmax,ymin,ymax,z5,z6,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
for (int i = 0; i < num_streams; i++) {
hipStreamSynchronize(streams[i]);
}
hipLaunchKernelGGL(( kernel_add_source_kernel), dim3(1), dim3(1), 0, 0, d_v, IDX3_l(sx,sy,sz), source[istep]);
clock_gettime(CLOCK_REALTIME, &end);
*time_kernel += (end.tv_sec - start.tv_sec) +
(double)(end.tv_nsec - start.tv_nsec) / 1.0e9;
float *t = d_u;
d_u = d_v;
d_v = t;
// Print out
if (istep % npo == 0) {
printf("time step %u / %u\n", istep, nsteps);
}
}
for (int i = 0; i < num_streams; i++) {
hipStreamDestroy(streams[i]);
}
hipMemcpy(u, d_u, sizeof(float) * size_u, hipMemcpyDeviceToHost);
hipFree(d_u);
hipFree(d_v);
hipFree(d_vp);
hipFree(d_phi);
hipFree(d_eta);
}
| b6d1ec13e2a347e6dae42e1e2dcb8fe8d929f14f.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <float.h>
#include <math.h>
#include <time.h>
#include "../../constants.h"
#define N_RADIUS 4
#define N_THREADS_PER_PLANE_DIM 16
__global__ void target_inner_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta
) {
const llint j0 = y3 + blockIdx.y * blockDim.y;
const llint k0 = z3 + blockIdx.x * blockDim.x;
const llint je = min(j0 + N_THREADS_PER_PLANE_DIM, y4);
const llint ke = min(k0 + N_THREADS_PER_PLANE_DIM, z4);
const llint j = j0 + threadIdx.y;
const llint k = k0 + threadIdx.x;
float infront1, infront2, infront3, infront4; // variables for input “in front of” the current slice
float behind1, behind2, behind3, behind4; // variables for input “behind” the current slice
float current; // input value in the current slice
behind3 = u[IDX3_l(x3-4,j,k)];
behind2 = u[IDX3_l(x3-3,j,k)];
behind1 = u[IDX3_l(x3-2,j,k)];
current = u[IDX3_l(x3-1,j,k)];
infront1 = u[IDX3_l(x3+0,j,k)];
infront2 = u[IDX3_l(x3+1,j,k)];
infront3 = u[IDX3_l(x3+2,j,k)];
infront4 = u[IDX3_l(x3+3,j,k)];
for (llint i = x3; i < x4; i++) {
// advance the slice (move the thread-front)
behind4 = behind3;
behind3 = behind2;
behind2 = behind1;
behind1 = current;
current = infront1;
infront1 = infront2;
infront2 = infront3;
infront3 = infront4;
infront4 = u[IDX3_l(i+N_RADIUS,j,k)];
__syncthreads();
if (j < y4 && k < z4) {
float lap = __fmaf_rn(coef0, u[IDX3_l(i,j,k)]
, __fmaf_rn(coefx_1, __fadd_rn(infront1,behind1)
, __fmaf_rn(coefy_1, __fadd_rn(u[IDX3_l(i,j+1,k)],u[IDX3_l(i,j-1,k)])
, __fmaf_rn(coefz_1, __fadd_rn(u[IDX3_l(i,j,k+1)],u[IDX3_l(i,j,k-1)])
, __fmaf_rn(coefx_2, __fadd_rn(infront2,behind2)
, __fmaf_rn(coefy_2, __fadd_rn(u[IDX3_l(i,j+2,k)],u[IDX3_l(i,j-2,k)])
, __fmaf_rn(coefz_2, __fadd_rn(u[IDX3_l(i,j,k+2)],u[IDX3_l(i,j,k-2)])
, __fmaf_rn(coefx_3, __fadd_rn(infront3,behind3)
, __fmaf_rn(coefy_3, __fadd_rn(u[IDX3_l(i,j+3,k)],u[IDX3_l(i,j-3,k)])
, __fmaf_rn(coefz_3, __fadd_rn(u[IDX3_l(i,j,k+3)],u[IDX3_l(i,j,k-3)])
, __fmaf_rn(coefx_4, __fadd_rn(infront4,behind4)
, __fmaf_rn(coefy_4, __fadd_rn(u[IDX3_l(i,j+4,k)],u[IDX3_l(i,j-4,k)])
, __fmul_rn(coefz_4, __fadd_rn(u[IDX3_l(i,j,k+4)],u[IDX3_l(i,j,k-4)])
)))))))))))));
v[IDX3_l(i,j,k)] = __fmaf_rn(2.f, current,
__fmaf_rn(vp[IDX3(i,j,k)], lap, -v[IDX3_l(i,j,k)])
);
}
}
}
__global__ void target_pml_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
float *__restrict__ phi, const float *__restrict__ eta
) {
const llint j0 = y3 + blockIdx.y * blockDim.y;
const llint k0 = z3 + blockIdx.x * blockDim.x;
const llint je = min(j0 + N_THREADS_PER_PLANE_DIM, y4);
const llint ke = min(k0 + N_THREADS_PER_PLANE_DIM, z4);
const llint j = j0 + threadIdx.y;
const llint k = k0 + threadIdx.x;
float infront1, infront2, infront3, infront4; // variables for input “in front of” the current slice
float behind1, behind2, behind3, behind4; // variables for input “behind” the current slice
float current; // input value in the current slice
float infrontEta, behindEta, currentEta;
behind3 = u[IDX3_l(x3-4,j,k)];
behind2 = u[IDX3_l(x3-3,j,k)];
behind1 = u[IDX3_l(x3-2,j,k)];
current = u[IDX3_l(x3-1,j,k)];
infront1 = u[IDX3_l(x3+0,j,k)];
infront2 = u[IDX3_l(x3+1,j,k)];
infront3 = u[IDX3_l(x3+2,j,k)];
infront4 = u[IDX3_l(x3+3,j,k)];
currentEta = eta[IDX3_eta1(x3-1,j,k)];
infrontEta = eta[IDX3_eta1(x3,j,k)];
for (llint i = x3; i < x4; i++) {
// advance the slice (move the thread-front)
behind4 = behind3;
behind3 = behind2;
behind2 = behind1;
behind1 = current;
current = infront1;
infront1 = infront2;
infront2 = infront3;
infront3 = infront4;
infront4 = u[IDX3_l(i+N_RADIUS,j,k)];
behindEta = currentEta;
currentEta = infrontEta;
infrontEta = eta[IDX3_eta1(i+1,j,k)];
__syncthreads();
if (j < y4 && k < z4) {
float lap = __fmaf_rn(coef0, u[IDX3_l(i,j,k)]
, __fmaf_rn(coefx_1, __fadd_rn(infront1,behind1)
, __fmaf_rn(coefy_1, __fadd_rn(u[IDX3_l(i,j+1,k)],u[IDX3_l(i,j-1,k)])
, __fmaf_rn(coefz_1, __fadd_rn(u[IDX3_l(i,j,k+1)],u[IDX3_l(i,j,k-1)])
, __fmaf_rn(coefx_2, __fadd_rn(infront2,behind2)
, __fmaf_rn(coefy_2, __fadd_rn(u[IDX3_l(i,j+2,k)],u[IDX3_l(i,j-2,k)])
, __fmaf_rn(coefz_2, __fadd_rn(u[IDX3_l(i,j,k+2)],u[IDX3_l(i,j,k-2)])
, __fmaf_rn(coefx_3, __fadd_rn(infront3,behind3)
, __fmaf_rn(coefy_3, __fadd_rn(u[IDX3_l(i,j+3,k)],u[IDX3_l(i,j-3,k)])
, __fmaf_rn(coefz_3, __fadd_rn(u[IDX3_l(i,j,k+3)],u[IDX3_l(i,j,k-3)])
, __fmaf_rn(coefx_4, __fadd_rn(infront4,behind4)
, __fmaf_rn(coefy_4, __fadd_rn(u[IDX3_l(i,j+4,k)],u[IDX3_l(i,j-4,k)])
, __fmul_rn(coefz_4, __fadd_rn(u[IDX3_l(i,j,k+4)],u[IDX3_l(i,j,k-4)])
)))))))))))));
v[IDX3_l(i,j,k)] = __fdiv_rn(
__fmaf_rn(
__fmaf_rn(2.f, currentEta,
__fsub_rn(2.f,
__fmul_rn(currentEta, currentEta)
)
),
current,
__fmaf_rn(
vp[IDX3(i,j,k)],
__fadd_rn(lap, phi[IDX3(i,j,k)]),
-v[IDX3_l(i,j,k)]
)
),
__fmaf_rn(2.f, currentEta, 1.f)
);
phi[IDX3(i,j,k)] = __fdiv_rn(
__fsub_rn(
phi[IDX3(i,j,k)],
__fmaf_rn(
__fmul_rn(
__fsub_rn(infrontEta,behindEta),
__fsub_rn(infront1,behind1)
), hdx_2,
__fmaf_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i,j+1,k)], eta[IDX3_eta1(i,j-1,k)]),
__fsub_rn(u[IDX3_l(i,j+1,k)], u[IDX3_l(i,j-1,k)])
), hdy_2,
__fmul_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i,j,k+1)], eta[IDX3_eta1(i,j,k-1)]),
__fsub_rn(u[IDX3_l(i,j,k+1)], u[IDX3_l(i,j,k-1)])
),
hdz_2)
))
)
,
__fadd_rn(1.f, currentEta)
);
}
}
}
__global__ void kernel_add_source_kernel(float *g_u, llint idx, float source) {
g_u[idx] += source;
}
extern "C" void target(
uint nsteps, double *time_kernel,
llint nx, llint ny, llint nz,
llint x1, llint x2, llint x3, llint x4, llint x5, llint x6,
llint y1, llint y2, llint y3, llint y4, llint y5, llint y6,
llint z1, llint z2, llint z3, llint z4, llint z5, llint z6,
llint lx, llint ly, llint lz,
llint sx, llint sy, llint sz,
float hdx_2, float hdy_2, float hdz_2,
const float *__restrict__ coefx, const float *__restrict__ coefy, const float *__restrict__ coefz,
float *__restrict__ u, const float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta, const float *__restrict__ source
) {
struct timespec start, end;
const llint size_u = (nx + 2 * lx) * (ny + 2 * ly) * (nz + 2 * lz);
const llint size_v = size_u;
const llint size_phi = nx*ny*nz;
const llint size_vp = size_phi;
const llint size_eta = (nx+2)*(ny+2)*(nz+2);
float *d_u, *d_v, *d_vp, *d_phi, *d_eta;
cudaMalloc(&d_u, sizeof(float) * size_u);
cudaMalloc(&d_v, sizeof(float) * size_u);
cudaMalloc(&d_vp, sizeof(float) * size_vp);
cudaMalloc(&d_phi, sizeof(float) * size_phi);
cudaMalloc(&d_eta, sizeof(float) * size_eta);
cudaMemcpy(d_u, u, sizeof(float) * size_u, cudaMemcpyHostToDevice);
cudaMemcpy(d_v, v, sizeof(float) * size_v, cudaMemcpyHostToDevice);
cudaMemcpy(d_vp, vp, sizeof(float) * size_vp, cudaMemcpyHostToDevice);
cudaMemcpy(d_phi, phi, sizeof(float) * size_phi, cudaMemcpyHostToDevice);
cudaMemcpy(d_eta, eta, sizeof(float) * size_eta, cudaMemcpyHostToDevice);
const llint xmin = 0; const llint xmax = nx;
const llint ymin = 0; const llint ymax = ny;
dim3 threadsPerBlock(N_THREADS_PER_PLANE_DIM, N_THREADS_PER_PLANE_DIM, 1);
int num_streams = 7;
cudaStream_t streams[num_streams];
for (int i = 0; i < num_streams; i++) {
cudaStreamCreateWithFlags(&(streams[i]), cudaStreamNonBlocking);
}
const uint npo = 100;
for (uint istep = 1; istep <= nsteps; ++istep) {
clock_gettime(CLOCK_REALTIME, &start);
dim3 n_block_front(
(z2-z1+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
(ny+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM);
target_pml_3d_kernel<<<n_block_front, threadsPerBlock, 0, streams[1]>>>(nx,ny,nz,
xmin,xmax,ymin,ymax,z1,z2,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_top(
(z4-z3+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
(y2-y1+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM);
target_pml_3d_kernel<<<n_block_top, threadsPerBlock, 0, streams[2]>>>(nx,ny,nz,
xmin,xmax,y1,y2,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_left(
(z4-z3+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
(y4-y3+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
1);
target_pml_3d_kernel<<<n_block_left, threadsPerBlock, 0, streams[3]>>>(nx,ny,nz,
x1,x2,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_center(
(z4-z3+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
(y4-y3+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM);
target_inner_3d_kernel<<<n_block_center, threadsPerBlock, 0, streams[0]>>>(nx,ny,nz,
x3,x4,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_right(
(z4-z3+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
(y4-y3+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
1);
target_pml_3d_kernel<<<n_block_right, threadsPerBlock, 0, streams[4]>>>(nx,ny,nz,
x5,x6,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_bottom(
(z4-z3+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
(y6-y5+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
1);
target_pml_3d_kernel<<<n_block_bottom, threadsPerBlock, 0, streams[5]>>>(nx,ny,nz,
xmin,xmax,y5,y6,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_back(
(z6-z5+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
(ny+N_THREADS_PER_PLANE_DIM-1) / N_THREADS_PER_PLANE_DIM,
1);
target_pml_3d_kernel<<<n_block_back, threadsPerBlock, 0, streams[6]>>>(nx,ny,nz,
xmin,xmax,ymin,ymax,z5,z6,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
for (int i = 0; i < num_streams; i++) {
cudaStreamSynchronize(streams[i]);
}
kernel_add_source_kernel<<<1, 1>>>(d_v, IDX3_l(sx,sy,sz), source[istep]);
clock_gettime(CLOCK_REALTIME, &end);
*time_kernel += (end.tv_sec - start.tv_sec) +
(double)(end.tv_nsec - start.tv_nsec) / 1.0e9;
float *t = d_u;
d_u = d_v;
d_v = t;
// Print out
if (istep % npo == 0) {
printf("time step %u / %u\n", istep, nsteps);
}
}
for (int i = 0; i < num_streams; i++) {
cudaStreamDestroy(streams[i]);
}
cudaMemcpy(u, d_u, sizeof(float) * size_u, cudaMemcpyDeviceToHost);
cudaFree(d_u);
cudaFree(d_v);
cudaFree(d_vp);
cudaFree(d_phi);
cudaFree(d_eta);
}
|
76dc865dc8f012ba167c2db6e36bbbac3d5208b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "measuring.h"
#include <hip/hip_runtime.h>
#define CUPRINTF(fmt, ...) printf("[%d, %d , %d]:\t" fmt, \
blockIdx.x*gridDim.x+threadIdx.x,\
blockIdx.y*gridDim.y+threadIdx.y,\
blockIdx.z*gridDim.z+threadIdx.z,\
__VA_ARGS__)
#include "../../shared_test.cu"
void device_memory_allocation(double** DevBuffer, int buffer_size)
{
hipMalloc((void**) DevBuffer, buffer_size * sizeof(double));
}
void device_memory_free(double* DevBuffer)
{
hipFree(DevBuffer);
}
//
// ,
//
void device_initialization(int rank)
{
// , ,
int device = rank % GPU_PER_NODE;
hipSetDevice(device);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, device);
if (devProp.major < 2)
{
printf("\nError! Compute capability < 2, rank=%d\n", rank);
}
/*
if (!rank)
{
printf("Name : %s\n", devProp.name);
printf("Compute capability : %d.%d\n", devProp.major, devProp.minor);
printf("Total Global Memory : %ld\n", devProp.totalGlobalMem);
printf("Shared memory per block: %d\n", devProp.sharedMemPerBlock);
printf("Registers per block : %d\n", devProp.regsPerBlock);
printf("Warp size : %d\n", devProp.warpSize);
printf("Max threads per block : %d\n", devProp.maxThreadsPerBlock);
printf("Total constant memory : %d\n", devProp.totalConstMem);
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
//printf("Kernel execution timeout: %s\n\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
for (int i = 0; i < 3; ++i)
{
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
}
for (int i = 0; i < 3; ++i)
{
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
}
}*/
fflush(stdout);
// cuPrintf
// kernel
cudaPrintfInit();
}
//
void device_finalization(void)
{
// cuPrintf
// kernel
cudaPrintfEnd();
}
__global__ void load_exchange_data_part_kernel(double* DevBuffer, int buffer_size)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < buffer_size)
{
DevBuffer[i] = DevBuffer[buffer_size - i - 1];
}
}
void load_exchange_data_part(double* HostBuffer, double* DevBuffer, int buffer_size)
{
hipLaunchKernelGGL(( load_exchange_data_part_kernel) , dim3(dim3(buffer_size / BlockNX, 1)), dim3(dim3(BlockNX, 1)), 0, 0, DevBuffer, buffer_size);
checkErrors("load_exchange_data_part", __FILE__, __LINE__);
cudaPrintfDisplay(stdout, true);
hipMemcpy(HostBuffer, DevBuffer, buffer_size * sizeof(double), hipMemcpyDeviceToHost);
checkErrors("copy data to host", __FILE__, __LINE__);
cudaPrintfDisplay(stdout, true);
}
__global__ void save_exchange_data_part_kernel(double* DevBuffer, int buffer_size)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < buffer_size)
{
DevBuffer[i] = DevBuffer[buffer_size - i - 1];
}
}
void save_exchange_data_part(double* HostBuffer, double* DevBuffer, int buffer_size)
{
hipMemcpy(DevBuffer, HostBuffer, buffer_size * sizeof(double), hipMemcpyHostToDevice);
checkErrors("copy data to device", __FILE__, __LINE__);
cudaPrintfDisplay(stdout, true);
hipLaunchKernelGGL(( save_exchange_data_part_kernel) , dim3(dim3(buffer_size / BlockNX, 1)), dim3(dim3(BlockNX, 1)), 0, 0, DevBuffer, buffer_size);
checkErrors("save_exchange_data_part", __FILE__, __LINE__);
cudaPrintfDisplay(stdout, true);
}
__global__ void set_devbuffer_values_kernel(double *DevBuffer, int buffer_size)
{
for (int i = 0; i < buffer_size; i++)
{
DevBuffer[i] = buffer_size / i;
}
}
void set_devbuffer_values(double *DevBuffer, int buffer_size)
{
hipLaunchKernelGGL(( set_devbuffer_values_kernel) , dim3(dim3(buffer_size / BlockNX, 1)), dim3(dim3(BlockNX, 1)), 0, 0, DevBuffer, buffer_size);
checkErrors("set_devbuffer_values", __FILE__, __LINE__);
cudaPrintfDisplay(stdout, true);
}
| 76dc865dc8f012ba167c2db6e36bbbac3d5208b6.cu | #include "measuring.h"
#include <cuda.h>
#define CUPRINTF(fmt, ...) printf("[%d, %d , %d]:\t" fmt, \
blockIdx.x*gridDim.x+threadIdx.x,\
blockIdx.y*gridDim.y+threadIdx.y,\
blockIdx.z*gridDim.z+threadIdx.z,\
__VA_ARGS__)
#include "../../shared_test.cu"
void device_memory_allocation(double** DevBuffer, int buffer_size)
{
cudaMalloc((void**) DevBuffer, buffer_size * sizeof(double));
}
void device_memory_free(double* DevBuffer)
{
cudaFree(DevBuffer);
}
// Инициализация ускорителя
// Расчет происходит на ускорителе, номер которого равен
// номеру запускающего процессора
void device_initialization(int rank)
{
// Считаем, что ядер на узле не меньше, чем ускорителей
int device = rank % GPU_PER_NODE;
cudaSetDevice(device);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, device);
if (devProp.major < 2)
{
printf("\nError! Compute capability < 2, rank=%d\n", rank);
}
/*
if (!rank)
{
printf("Name : %s\n", devProp.name);
printf("Compute capability : %d.%d\n", devProp.major, devProp.minor);
printf("Total Global Memory : %ld\n", devProp.totalGlobalMem);
printf("Shared memory per block: %d\n", devProp.sharedMemPerBlock);
printf("Registers per block : %d\n", devProp.regsPerBlock);
printf("Warp size : %d\n", devProp.warpSize);
printf("Max threads per block : %d\n", devProp.maxThreadsPerBlock);
printf("Total constant memory : %d\n", devProp.totalConstMem);
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
//printf("Kernel execution timeout: %s\n\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
for (int i = 0; i < 3; ++i)
{
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
}
for (int i = 0; i < 3; ++i)
{
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
}
}*/
fflush(stdout);
// Инициализируем библиотеку cuPrintf для вывода текста на консоль
// прямо из kernel
cudaPrintfInit();
}
// Финализация ускорителя
void device_finalization(void)
{
// Останавливаем библиотеку cuPrintf для вывода текста на консоль
// прямо из kernel
cudaPrintfEnd();
}
__global__ void load_exchange_data_part_kernel(double* DevBuffer, int buffer_size)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < buffer_size)
{
DevBuffer[i] = DevBuffer[buffer_size - i - 1];
}
}
void load_exchange_data_part(double* HostBuffer, double* DevBuffer, int buffer_size)
{
load_exchange_data_part_kernel <<< dim3(buffer_size / BlockNX, 1), dim3(BlockNX, 1)>>>(DevBuffer, buffer_size);
checkErrors("load_exchange_data_part", __FILE__, __LINE__);
cudaPrintfDisplay(stdout, true);
cudaMemcpy(HostBuffer, DevBuffer, buffer_size * sizeof(double), cudaMemcpyDeviceToHost);
checkErrors("copy data to host", __FILE__, __LINE__);
cudaPrintfDisplay(stdout, true);
}
__global__ void save_exchange_data_part_kernel(double* DevBuffer, int buffer_size)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < buffer_size)
{
DevBuffer[i] = DevBuffer[buffer_size - i - 1];
}
}
void save_exchange_data_part(double* HostBuffer, double* DevBuffer, int buffer_size)
{
cudaMemcpy(DevBuffer, HostBuffer, buffer_size * sizeof(double), cudaMemcpyHostToDevice);
checkErrors("copy data to device", __FILE__, __LINE__);
cudaPrintfDisplay(stdout, true);
save_exchange_data_part_kernel <<< dim3(buffer_size / BlockNX, 1), dim3(BlockNX, 1)>>>(DevBuffer, buffer_size);
checkErrors("save_exchange_data_part", __FILE__, __LINE__);
cudaPrintfDisplay(stdout, true);
}
__global__ void set_devbuffer_values_kernel(double *DevBuffer, int buffer_size)
{
for (int i = 0; i < buffer_size; i++)
{
DevBuffer[i] = buffer_size / i;
}
}
void set_devbuffer_values(double *DevBuffer, int buffer_size)
{
set_devbuffer_values_kernel <<< dim3(buffer_size / BlockNX, 1), dim3(BlockNX, 1)>>>(DevBuffer, buffer_size);
checkErrors("set_devbuffer_values", __FILE__, __LINE__);
cudaPrintfDisplay(stdout, true);
}
|
6fefda2cb8fee01fdc9d35a073dbf5dd2ae26e9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void vector_add(long int *a, long int *b, long int *c)
{
/* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = a[index] + b[index];
}
/* experiment with N */
/* how large can it be? */
#define N (2*2048*2048*2)
#define THREADS_PER_BLOCK 512
int main()
{
long int *a, *b, *c;
long int *d_a, *d_b, *d_c;
long int size = N * sizeof(long int );
printf("Value of N=%d \n", N);
/* allocate space for device copies of a, b, c */
printf("Memory allocation for GPU device\n");
hipMalloc( (void **) &d_a, size );
hipMalloc( (void **) &d_b, size );
hipMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
printf("Memory allocation for CPU\n");
a = (long int *)malloc( size );
b = (long int *)malloc( size );
c = (long int *)malloc( size );
printf("Defining the numbers\n");
for( long int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
/* fix the parameters needed to copy data to the device */
hipMemcpy( d_a, a, size, hipMemcpyHostToDevice );
hipMemcpy( d_b, b, size, hipMemcpyHostToDevice );
/* launch the kernel on the GPU */
/* insert the launch parameters to launch the kernel properly using blocks and threads */
hipLaunchKernelGGL(( vector_add), dim3((N + (THREADS_PER_BLOCK-1)) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK) , 0, 0, d_a, d_b, d_c );
/* copy result back to host */
/* fix the parameters needed to copy data back to the host */
hipMemcpy( c, d_c, size, hipMemcpyDeviceToHost );
printf( "c[0] = %d\n",c[0] );
printf( "c[%d] = %d\n",N-1, c[N-1] );
/* clean up */
free(a);
free(b);
free(c);
hipFree( d_a );
hipFree( d_b );
hipFree( d_c );
return 0;
} /* end main */
| 6fefda2cb8fee01fdc9d35a073dbf5dd2ae26e9b.cu | #include <stdio.h>
__global__ void vector_add(long int *a, long int *b, long int *c)
{
/* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = a[index] + b[index];
}
/* experiment with N */
/* how large can it be? */
#define N (2*2048*2048*2)
#define THREADS_PER_BLOCK 512
int main()
{
long int *a, *b, *c;
long int *d_a, *d_b, *d_c;
long int size = N * sizeof(long int );
printf("Value of N=%d \n", N);
/* allocate space for device copies of a, b, c */
printf("Memory allocation for GPU device\n");
cudaMalloc( (void **) &d_a, size );
cudaMalloc( (void **) &d_b, size );
cudaMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
printf("Memory allocation for CPU\n");
a = (long int *)malloc( size );
b = (long int *)malloc( size );
c = (long int *)malloc( size );
printf("Defining the numbers\n");
for( long int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
/* fix the parameters needed to copy data to the device */
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice );
/* launch the kernel on the GPU */
/* insert the launch parameters to launch the kernel properly using blocks and threads */
vector_add<<<(N + (THREADS_PER_BLOCK-1)) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( d_a, d_b, d_c );
/* copy result back to host */
/* fix the parameters needed to copy data back to the host */
cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost );
printf( "c[0] = %d\n",c[0] );
printf( "c[%d] = %d\n",N-1, c[N-1] );
/* clean up */
free(a);
free(b);
free(c);
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_c );
return 0;
} /* end main */
|
c02b15f114b35a9b72556409a7f732dd665f8451.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <sys/time.h>
#include <helper_cuda.h>
#include "scan_common.h"
//All three kernels run 512 threads per workgroup
//Must be a power of two
#define THREADBLOCK_SIZE 512
#define LOOP_PERTHREAD 16
#define LOOP_PERTHREAD2 16
////////////////////////////////////////////////////////////////////////////////
// Basic ccan codelets
////////////////////////////////////////////////////////////////////////////////
//Naive inclusive scan: O(N * log2(N)) operations
//Allocate 2 * 'size' local memory, initialize the first half
//with 'size' zeros avoiding if(pos >= offset) condition evaluation
//and saving instructions
inline __device__ uint scan1Inclusive(uint idata, volatile uint *s_Data, uint size)
{
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for (uint offset = 1; offset < size; offset <<= 1)
{
__syncthreads();
uint t = s_Data[pos] + s_Data[pos - offset];
__syncthreads();
s_Data[pos] = t;
}
return s_Data[pos];
}
inline __device__ uint scan1Exclusive(uint idata, volatile uint *s_Data, uint size)
{
return scan1Inclusive(idata, s_Data, size) - idata;
}
inline __device__ uint4 scan4Inclusive(uint4 idata4, volatile uint *s_Data, uint size)
{
//Level-0 inclusive scan
idata4.y += idata4.x;
idata4.z += idata4.y;
idata4.w += idata4.z;
//Level-1 exclusive scan
uint oval = scan1Exclusive(idata4.w, s_Data, size / 4);
idata4.x += oval;
idata4.y += oval;
idata4.z += oval;
idata4.w += oval;
return idata4;
}
//Exclusive vector scan: the array to be scanned is stored
//in local thread memory scope as uint4
inline __device__ uint4 scan4Exclusive(uint4 idata4, volatile uint *s_Data, uint size)
{
uint4 odata4 = scan4Inclusive(idata4, s_Data, size);
odata4.x -= idata4.x;
odata4.y -= idata4.y;
odata4.z -= idata4.z;
odata4.w -= idata4.w;
return odata4;
}
////////////////////////////////////////////////////////////////////////////////
// Scan kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void scanExclusiveShared(
uint4 *d_Dst,
uint4 *d_Src,
uint size
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load data
uint4 idata4 = d_Src[pos];
//Calculate exclusive scan
uint4 odata4 = scan4Exclusive(idata4, s_Data, size);
//Write back
d_Dst[pos] = odata4;
}
//Exclusive scan of top elements of bottom-level scans (4 * THREADBLOCK_SIZE)
__global__ void scanExclusiveShared2(
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if (pos < N)
idata =
d_Dst[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos] + d_Src[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if (pos < N)
{
d_Buf[pos] = odata;
}
}
__global__ void scanExclusiveShared3(
uint *e_Buf,
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if (pos < N)
idata =
d_Buf[THREADBLOCK_SIZE -1 + pos * THREADBLOCK_SIZE] + d_Dst[(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) * pos] + d_Src[(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if (pos < N)
{
e_Buf[pos] = odata;
}
}
//Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays
__global__ void uniformUpdate(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0)
{
buf = d_Buffer[blockIdx.x];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
__global__ void uniformUpdate2(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint temp = blockIdx.x/THREADBLOCK_SIZE;
if (threadIdx.x == 0)
{
buf = d_Buffer[temp];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
__global__ void diff_kernel(
uint *d_Data,
uint *d_Src,
uint pnum,
uint length,
uint size
)
{
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint p_n = pnum;
uint len = length;
uint POS = pos * LOOP_PERTHREAD;
uint i;
for(i = POS ; (i < POS + LOOP_PERTHREAD)&&(i < len-1); i++){
d_Data[i] = d_Src[(i+1)*p_n] - d_Src[i * p_n];
}
if(i == (len-1)){
d_Data[len-1] = size - d_Src[(len-1)*p_n];
}
}
__global__ void transport_kernel(
uint *d_Data,
uint *d_Src,
uint loc
)
{
d_Data[0] = d_Src[loc-1];
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
//Derived as 32768 (max power-of-two gridDim.x) * 4 * THREADBLOCK_SIZE
//Due to scanExclusiveShared<<<>>>() 1D block addressing
extern "C" const uint MAX_BATCH_ELEMENTS = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MIN_SHORT_ARRAY_SIZE = 4;
extern "C" const uint MAX_SHORT_ARRAY_SIZE = 4 * THREADBLOCK_SIZE;
extern "C" const uint MIN_LARGE_ARRAY_SIZE = 8 * THREADBLOCK_SIZE;
extern "C" const uint MAX_LARGE_ARRAY_SIZE = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MIN_LL_SIZE = 8 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MAX_LL_SIZE = MAX_BATCH_ELEMENTS;//4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
//Internal exclusive scan buffer
static uint *d_Buf;
static uint *e_Buf;
extern "C" void initScan(void)
{
hipMalloc((void **)&d_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE)) * sizeof(uint));
checkCudaErrors(hipMalloc((void **)&e_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)) * sizeof(uint)));
}
extern "C" void closeScan(void)
{
checkCudaErrors(hipFree(d_Buf));
checkCudaErrors(hipFree(e_Buf));
}
static uint factorRadix2(uint &log2L, uint L)
{
if (!L)
{
log2L = 0;
return 0;
}
else
{
for (log2L = 0; (L & 1) == 0; L >>= 1, log2L++);
return L;
}
}
static uint iDivUp(uint dividend, uint divisor)
{
return ((dividend % divisor) == 0) ? (dividend / divisor) : (dividend / divisor + 1);
}
extern "C" size_t scanExclusiveShort(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
//Check supported size range
assert((arrayLength >= MIN_SHORT_ARRAY_SIZE) && (arrayLength <= MAX_SHORT_ARRAY_SIZE));
//Check total batch size limit
assert(arrayLength <= MAX_BATCH_ELEMENTS);
//Check all threadblocks to be fully packed with data
assert(arrayLength % (4 * THREADBLOCK_SIZE) == 0);
hipLaunchKernelGGL(( scanExclusiveShared), dim3(arrayLength / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint4 *)d_Src,
arrayLength
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLarge(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
/*
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
*/
assert(arrayLength%MAX_SHORT_ARRAY_SIZE == 0);
//Check supported size range
assert((arrayLength >= MIN_LARGE_ARRAY_SIZE) && (arrayLength <= MAX_LARGE_ARRAY_SIZE));
//Check total batch size limit
assert(arrayLength <= MAX_BATCH_ELEMENTS);
hipLaunchKernelGGL(( scanExclusiveShared), dim3(arrayLength / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
uint array_temp = THREADBLOCK_SIZE;
for(uint i = 2; i<=THREADBLOCK_SIZE ; i <<= 1){
if(i >= arrayLength/(4 * THREADBLOCK_SIZE)){
array_temp = i;
break;
}
}
const uint blockCount2 = 1;//iDivUp((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE);
hipLaunchKernelGGL(( scanExclusiveShared2), dim3(blockCount2), dim3(THREADBLOCK_SIZE), 0, 0,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE),
array_temp
);
getLastCudaError("scanExclusiveShared2() execution FAILED\n");
hipLaunchKernelGGL(( uniformUpdate), dim3((arrayLength) / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint *)d_Buf
);
getLastCudaError("uniformUpdate() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLL(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
/*
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
*/
assert((arrayLength%MAX_LARGE_ARRAY_SIZE) == 0);
//Check supported size range
assert((arrayLength >= MIN_LL_SIZE) && (arrayLength <= MAX_LL_SIZE));
//Check total batch size limit
assert((arrayLength) <= MAX_BATCH_ELEMENTS);
hipLaunchKernelGGL(( scanExclusiveShared), dim3(arrayLength / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
//Now ,prefix sum per THREADBLOCK_SIZE done
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
const uint blockCount2 = iDivUp (arrayLength / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE);
hipLaunchKernelGGL(( scanExclusiveShared2), dim3(blockCount2), dim3(THREADBLOCK_SIZE), 0, 0,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE),
THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared2() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
//prefix sum of last elements per THREADBLOCK_SIZE done
//this prefix sum can caluculate under only THREADBLOCK_SIZE size.
//so We need one more prefix sum for last elements.
uint array_temp = THREADBLOCK_SIZE;
for(uint i = 2; i<=THREADBLOCK_SIZE ; i <<= 1){
if(i >= arrayLength/(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)){
array_temp = i;
break;
}
}
const uint blockCount3 = 1;//(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE);
hipLaunchKernelGGL(( scanExclusiveShared3), dim3(blockCount3), dim3(THREADBLOCK_SIZE), 0, 0,
(uint *)e_Buf,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE),
array_temp
);
getLastCudaError("scanExclusiveShared3() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
//add d_Buf to each array of d_Dst
hipLaunchKernelGGL(( uniformUpdate), dim3(arrayLength / (4 * THREADBLOCK_SIZE )), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint *)d_Buf
);
//add e_Buf to each array of d_Dst
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( uniformUpdate2), dim3(arrayLength / (4 * THREADBLOCK_SIZE )), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint *)e_Buf
);
getLastCudaError("uniformUpdate() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
return THREADBLOCK_SIZE;
}
extern "C" size_t diff_Part(
uint *d_Dst,
uint *d_Src,
uint diff,
uint arrayLength,
uint size
)
{
const uint blockCount = iDivUp(arrayLength , LOOP_PERTHREAD*THREADBLOCK_SIZE);
hipLaunchKernelGGL(( diff_kernel), dim3(blockCount), dim3(THREADBLOCK_SIZE), 0, 0,
d_Dst,
d_Src,
diff,
arrayLength,
size
);
getLastCudaError("diff_Part() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
return THREADBLOCK_SIZE;
}
//transport input data to output per diff
extern "C" void transport_gpu(
uint *d_Dst,
uint *d_Src,
uint loc
)
{
//Check total batch size limit
//assert((arrayLength) <= MAX_BATCH_ELEMENTS);
const uint blockCount = 1;//iDivUp(arrayLength , LOOP_PERTHREAD2*THREADBLOCK_SIZE);
hipLaunchKernelGGL(( transport_kernel), dim3(blockCount), dim3(1), 0, 0,
d_Dst,
d_Src,
loc
);
getLastCudaError("transport_gpu() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
}
| c02b15f114b35a9b72556409a7f732dd665f8451.cu | /*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <sys/time.h>
#include <helper_cuda.h>
#include "scan_common.h"
//All three kernels run 512 threads per workgroup
//Must be a power of two
#define THREADBLOCK_SIZE 512
#define LOOP_PERTHREAD 16
#define LOOP_PERTHREAD2 16
////////////////////////////////////////////////////////////////////////////////
// Basic ccan codelets
////////////////////////////////////////////////////////////////////////////////
//Naive inclusive scan: O(N * log2(N)) operations
//Allocate 2 * 'size' local memory, initialize the first half
//with 'size' zeros avoiding if(pos >= offset) condition evaluation
//and saving instructions
inline __device__ uint scan1Inclusive(uint idata, volatile uint *s_Data, uint size)
{
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for (uint offset = 1; offset < size; offset <<= 1)
{
__syncthreads();
uint t = s_Data[pos] + s_Data[pos - offset];
__syncthreads();
s_Data[pos] = t;
}
return s_Data[pos];
}
inline __device__ uint scan1Exclusive(uint idata, volatile uint *s_Data, uint size)
{
return scan1Inclusive(idata, s_Data, size) - idata;
}
inline __device__ uint4 scan4Inclusive(uint4 idata4, volatile uint *s_Data, uint size)
{
//Level-0 inclusive scan
idata4.y += idata4.x;
idata4.z += idata4.y;
idata4.w += idata4.z;
//Level-1 exclusive scan
uint oval = scan1Exclusive(idata4.w, s_Data, size / 4);
idata4.x += oval;
idata4.y += oval;
idata4.z += oval;
idata4.w += oval;
return idata4;
}
//Exclusive vector scan: the array to be scanned is stored
//in local thread memory scope as uint4
inline __device__ uint4 scan4Exclusive(uint4 idata4, volatile uint *s_Data, uint size)
{
uint4 odata4 = scan4Inclusive(idata4, s_Data, size);
odata4.x -= idata4.x;
odata4.y -= idata4.y;
odata4.z -= idata4.z;
odata4.w -= idata4.w;
return odata4;
}
////////////////////////////////////////////////////////////////////////////////
// Scan kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void scanExclusiveShared(
uint4 *d_Dst,
uint4 *d_Src,
uint size
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load data
uint4 idata4 = d_Src[pos];
//Calculate exclusive scan
uint4 odata4 = scan4Exclusive(idata4, s_Data, size);
//Write back
d_Dst[pos] = odata4;
}
//Exclusive scan of top elements of bottom-level scans (4 * THREADBLOCK_SIZE)
__global__ void scanExclusiveShared2(
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if (pos < N)
idata =
d_Dst[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos] + d_Src[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if (pos < N)
{
d_Buf[pos] = odata;
}
}
__global__ void scanExclusiveShared3(
uint *e_Buf,
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if (pos < N)
idata =
d_Buf[THREADBLOCK_SIZE -1 + pos * THREADBLOCK_SIZE] + d_Dst[(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) * pos] + d_Src[(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if (pos < N)
{
e_Buf[pos] = odata;
}
}
//Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays
__global__ void uniformUpdate(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0)
{
buf = d_Buffer[blockIdx.x];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
__global__ void uniformUpdate2(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint temp = blockIdx.x/THREADBLOCK_SIZE;
if (threadIdx.x == 0)
{
buf = d_Buffer[temp];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
__global__ void diff_kernel(
uint *d_Data,
uint *d_Src,
uint pnum,
uint length,
uint size
)
{
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint p_n = pnum;
uint len = length;
uint POS = pos * LOOP_PERTHREAD;
uint i;
for(i = POS ; (i < POS + LOOP_PERTHREAD)&&(i < len-1); i++){
d_Data[i] = d_Src[(i+1)*p_n] - d_Src[i * p_n];
}
if(i == (len-1)){
d_Data[len-1] = size - d_Src[(len-1)*p_n];
}
}
__global__ void transport_kernel(
uint *d_Data,
uint *d_Src,
uint loc
)
{
d_Data[0] = d_Src[loc-1];
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
//Derived as 32768 (max power-of-two gridDim.x) * 4 * THREADBLOCK_SIZE
//Due to scanExclusiveShared<<<>>>() 1D block addressing
extern "C" const uint MAX_BATCH_ELEMENTS = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MIN_SHORT_ARRAY_SIZE = 4;
extern "C" const uint MAX_SHORT_ARRAY_SIZE = 4 * THREADBLOCK_SIZE;
extern "C" const uint MIN_LARGE_ARRAY_SIZE = 8 * THREADBLOCK_SIZE;
extern "C" const uint MAX_LARGE_ARRAY_SIZE = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MIN_LL_SIZE = 8 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MAX_LL_SIZE = MAX_BATCH_ELEMENTS;//4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
//Internal exclusive scan buffer
static uint *d_Buf;
static uint *e_Buf;
extern "C" void initScan(void)
{
cudaMalloc((void **)&d_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE)) * sizeof(uint));
checkCudaErrors(cudaMalloc((void **)&e_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)) * sizeof(uint)));
}
extern "C" void closeScan(void)
{
checkCudaErrors(cudaFree(d_Buf));
checkCudaErrors(cudaFree(e_Buf));
}
static uint factorRadix2(uint &log2L, uint L)
{
if (!L)
{
log2L = 0;
return 0;
}
else
{
for (log2L = 0; (L & 1) == 0; L >>= 1, log2L++);
return L;
}
}
static uint iDivUp(uint dividend, uint divisor)
{
return ((dividend % divisor) == 0) ? (dividend / divisor) : (dividend / divisor + 1);
}
extern "C" size_t scanExclusiveShort(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
//Check supported size range
assert((arrayLength >= MIN_SHORT_ARRAY_SIZE) && (arrayLength <= MAX_SHORT_ARRAY_SIZE));
//Check total batch size limit
assert(arrayLength <= MAX_BATCH_ELEMENTS);
//Check all threadblocks to be fully packed with data
assert(arrayLength % (4 * THREADBLOCK_SIZE) == 0);
scanExclusiveShared<<<arrayLength / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint4 *)d_Src,
arrayLength
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLarge(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
/*
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
*/
assert(arrayLength%MAX_SHORT_ARRAY_SIZE == 0);
//Check supported size range
assert((arrayLength >= MIN_LARGE_ARRAY_SIZE) && (arrayLength <= MAX_LARGE_ARRAY_SIZE));
//Check total batch size limit
assert(arrayLength <= MAX_BATCH_ELEMENTS);
scanExclusiveShared<<<arrayLength / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
uint array_temp = THREADBLOCK_SIZE;
for(uint i = 2; i<=THREADBLOCK_SIZE ; i <<= 1){
if(i >= arrayLength/(4 * THREADBLOCK_SIZE)){
array_temp = i;
break;
}
}
const uint blockCount2 = 1;//iDivUp((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE);
scanExclusiveShared2<<< blockCount2, THREADBLOCK_SIZE>>>(
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE),
array_temp
);
getLastCudaError("scanExclusiveShared2() execution FAILED\n");
uniformUpdate<<<(arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint *)d_Buf
);
getLastCudaError("uniformUpdate() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLL(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
/*
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
*/
assert((arrayLength%MAX_LARGE_ARRAY_SIZE) == 0);
//Check supported size range
assert((arrayLength >= MIN_LL_SIZE) && (arrayLength <= MAX_LL_SIZE));
//Check total batch size limit
assert((arrayLength) <= MAX_BATCH_ELEMENTS);
scanExclusiveShared<<<arrayLength / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
//Now ,prefix sum per THREADBLOCK_SIZE done
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
const uint blockCount2 = iDivUp (arrayLength / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE);
scanExclusiveShared2<<< blockCount2, THREADBLOCK_SIZE>>>(
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE),
THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared2() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
//prefix sum of last elements per THREADBLOCK_SIZE done
//this prefix sum can caluculate under only THREADBLOCK_SIZE size.
//so We need one more prefix sum for last elements.
uint array_temp = THREADBLOCK_SIZE;
for(uint i = 2; i<=THREADBLOCK_SIZE ; i <<= 1){
if(i >= arrayLength/(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)){
array_temp = i;
break;
}
}
const uint blockCount3 = 1;//(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE);
scanExclusiveShared3<<< blockCount3, THREADBLOCK_SIZE>>>(
(uint *)e_Buf,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE),
array_temp
);
getLastCudaError("scanExclusiveShared3() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
//add d_Buf to each array of d_Dst
uniformUpdate<<<arrayLength / (4 * THREADBLOCK_SIZE ), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint *)d_Buf
);
//add e_Buf to each array of d_Dst
checkCudaErrors(cudaDeviceSynchronize());
uniformUpdate2<<<arrayLength / (4 * THREADBLOCK_SIZE ), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint *)e_Buf
);
getLastCudaError("uniformUpdate() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
return THREADBLOCK_SIZE;
}
extern "C" size_t diff_Part(
uint *d_Dst,
uint *d_Src,
uint diff,
uint arrayLength,
uint size
)
{
const uint blockCount = iDivUp(arrayLength , LOOP_PERTHREAD*THREADBLOCK_SIZE);
diff_kernel<<<blockCount, THREADBLOCK_SIZE>>>(
d_Dst,
d_Src,
diff,
arrayLength,
size
);
getLastCudaError("diff_Part() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
return THREADBLOCK_SIZE;
}
//transport input data to output per diff
extern "C" void transport_gpu(
uint *d_Dst,
uint *d_Src,
uint loc
)
{
//Check total batch size limit
//assert((arrayLength) <= MAX_BATCH_ELEMENTS);
const uint blockCount = 1;//iDivUp(arrayLength , LOOP_PERTHREAD2*THREADBLOCK_SIZE);
transport_kernel<<<blockCount, 1>>>(
d_Dst,
d_Src,
loc
);
getLastCudaError("transport_gpu() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
}
|
3da90d917755ee4a260b11244571eeeb67d4ac9f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Implements interlace NV12 frames batch resize
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "resize_convert.h"
__global__ static void resizeNV12BatchKernel(hipTextureObject_t texSrcLuma,
hipTextureObject_t texSrcChroma,
uint8_t *pDstNv12, int nSrcWidth,
int nSrcHeight, int nDstPitch,
int nDstWidth, int nDstHeight,
int nBatchSize) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int px = x * 2, py = y * 2;
if ((px + 1) >= nDstWidth || (py + 1) >= nDstHeight) return;
float fxScale = 1.0f * nSrcWidth / nDstWidth;
float fyScale = 1.0f * nSrcHeight / nDstHeight;
uint8_t *p = pDstNv12 + px + py * nDstPitch;
int hh = nDstHeight * 3 / 2;
int nByte = nDstPitch * hh;
int px_fxScale = px * fxScale;
int px_fxScale_1 = (px + 1) * fxScale;
int py_fyScale = py * fyScale;
int py_fyScale_1 = (py + 1) * fyScale;
for (int i = blockIdx.z; i < nBatchSize; i+=gridDim.z) {
*(uchar2 *)p = make_uchar2(tex2D<uint8_t>(texSrcLuma, px_fxScale, py_fyScale),
tex2D<uint8_t>(texSrcLuma, px_fxScale_1, py_fyScale));
*(uchar2 *)(p + nDstPitch) =
make_uchar2(tex2D<uint8_t>(texSrcLuma, px_fxScale, py_fyScale_1),
tex2D<uint8_t>(texSrcLuma, px_fxScale_1, py_fyScale_1));
*(uchar2 *)(p + (nDstHeight - y) * nDstPitch) = tex2D<uchar2>(
texSrcChroma, x * fxScale, (hh * i + nDstHeight + y) * fyScale);
p += nByte;
py += hh;
}
}
void resizeNV12Batch(uint8_t *dpSrc, int nSrcPitch, int nSrcWidth,
int nSrcHeight, uint8_t *dpDst, int nDstPitch,
int nDstWidth, int nDstHeight, int nBatchSize,
hipStream_t stream) {
int hhSrc = ceilf(nSrcHeight * 3.0f / 2.0f);
hipResourceDesc resDesc = {};
resDesc.resType = hipResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = dpSrc;
resDesc.res.pitch2D.desc = hipCreateChannelDesc<uint8_t>();
resDesc.res.pitch2D.width = nSrcWidth;
resDesc.res.pitch2D.height = hhSrc * nBatchSize;
resDesc.res.pitch2D.pitchInBytes = nSrcPitch;
hipTextureDesc texDesc = {};
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
hipTextureObject_t texLuma = 0;
checkCudaErrors(hipCreateTextureObject(&texLuma, &resDesc, &texDesc, NULL));
resDesc.res.pitch2D.desc = hipCreateChannelDesc<uchar2>();
resDesc.res.pitch2D.width /= 2;
hipTextureObject_t texChroma = 0;
checkCudaErrors(hipCreateTextureObject(&texChroma, &resDesc, &texDesc, NULL));
dim3 block(32, 32, 1);
size_t blockDimZ = nBatchSize;
// Restricting blocks in Z-dim till 32 to not launch too many blocks
blockDimZ = (blockDimZ > 32) ? 32 : blockDimZ;
dim3 grid((nDstWidth / 2 + block.x) / block.x,
(nDstHeight / 2 + block.y) / block.y, blockDimZ);
hipLaunchKernelGGL(( resizeNV12BatchKernel), dim3(grid), dim3(block), 0, stream,
texLuma, texChroma, dpDst, nSrcWidth, nSrcHeight, nDstPitch, nDstWidth,
nDstHeight, nBatchSize);
checkCudaErrors(hipDestroyTextureObject(texLuma));
checkCudaErrors(hipDestroyTextureObject(texChroma));
}
| 3da90d917755ee4a260b11244571eeeb67d4ac9f.cu | /*
* Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Implements interlace NV12 frames batch resize
#include <cuda.h>
#include <cuda_runtime.h>
#include "resize_convert.h"
__global__ static void resizeNV12BatchKernel(cudaTextureObject_t texSrcLuma,
cudaTextureObject_t texSrcChroma,
uint8_t *pDstNv12, int nSrcWidth,
int nSrcHeight, int nDstPitch,
int nDstWidth, int nDstHeight,
int nBatchSize) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int px = x * 2, py = y * 2;
if ((px + 1) >= nDstWidth || (py + 1) >= nDstHeight) return;
float fxScale = 1.0f * nSrcWidth / nDstWidth;
float fyScale = 1.0f * nSrcHeight / nDstHeight;
uint8_t *p = pDstNv12 + px + py * nDstPitch;
int hh = nDstHeight * 3 / 2;
int nByte = nDstPitch * hh;
int px_fxScale = px * fxScale;
int px_fxScale_1 = (px + 1) * fxScale;
int py_fyScale = py * fyScale;
int py_fyScale_1 = (py + 1) * fyScale;
for (int i = blockIdx.z; i < nBatchSize; i+=gridDim.z) {
*(uchar2 *)p = make_uchar2(tex2D<uint8_t>(texSrcLuma, px_fxScale, py_fyScale),
tex2D<uint8_t>(texSrcLuma, px_fxScale_1, py_fyScale));
*(uchar2 *)(p + nDstPitch) =
make_uchar2(tex2D<uint8_t>(texSrcLuma, px_fxScale, py_fyScale_1),
tex2D<uint8_t>(texSrcLuma, px_fxScale_1, py_fyScale_1));
*(uchar2 *)(p + (nDstHeight - y) * nDstPitch) = tex2D<uchar2>(
texSrcChroma, x * fxScale, (hh * i + nDstHeight + y) * fyScale);
p += nByte;
py += hh;
}
}
void resizeNV12Batch(uint8_t *dpSrc, int nSrcPitch, int nSrcWidth,
int nSrcHeight, uint8_t *dpDst, int nDstPitch,
int nDstWidth, int nDstHeight, int nBatchSize,
cudaStream_t stream) {
int hhSrc = ceilf(nSrcHeight * 3.0f / 2.0f);
cudaResourceDesc resDesc = {};
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = dpSrc;
resDesc.res.pitch2D.desc = cudaCreateChannelDesc<uint8_t>();
resDesc.res.pitch2D.width = nSrcWidth;
resDesc.res.pitch2D.height = hhSrc * nBatchSize;
resDesc.res.pitch2D.pitchInBytes = nSrcPitch;
cudaTextureDesc texDesc = {};
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
cudaTextureObject_t texLuma = 0;
checkCudaErrors(cudaCreateTextureObject(&texLuma, &resDesc, &texDesc, NULL));
resDesc.res.pitch2D.desc = cudaCreateChannelDesc<uchar2>();
resDesc.res.pitch2D.width /= 2;
cudaTextureObject_t texChroma = 0;
checkCudaErrors(cudaCreateTextureObject(&texChroma, &resDesc, &texDesc, NULL));
dim3 block(32, 32, 1);
size_t blockDimZ = nBatchSize;
// Restricting blocks in Z-dim till 32 to not launch too many blocks
blockDimZ = (blockDimZ > 32) ? 32 : blockDimZ;
dim3 grid((nDstWidth / 2 + block.x) / block.x,
(nDstHeight / 2 + block.y) / block.y, blockDimZ);
resizeNV12BatchKernel<<<grid, block, 0, stream>>>(
texLuma, texChroma, dpDst, nSrcWidth, nSrcHeight, nDstPitch, nDstWidth,
nDstHeight, nBatchSize);
checkCudaErrors(cudaDestroyTextureObject(texLuma));
checkCudaErrors(cudaDestroyTextureObject(texChroma));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.