hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
0f749c217fa328582f2ffdfdda5b840da1ed09bf.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample implements a conjugate gradient solver on multiple GPU using
* Multi Device Cooperative Groups, also uses Unified Memory optimized using
* prefetching and usage hints.
*
*/
// includes, system
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <map>
#include <iostream>
#include <set>
#include <utility>
#include <hip/hip_runtime.h>
// Utilities and system includes
#include <helper_cuda.h> // helper function CUDA error checking and initialization
#include <helper_functions.h> // helper for shared functions common to CUDA Samples
#include <hip/hip_cooperative_groups.h>
#include <cooperative_groups/reduce.h>
namespace cg = cooperative_groups;
const char *sSDKname = "conjugateGradientMultiDeviceCG";
#define ENABLE_CPU_DEBUG_CODE 0
#define THREADS_PER_BLOCK 512
__device__ double grid_dot_result = 0.0;
/* genTridiag: generate a random tridiagonal symmetric matrix */
void genTridiag(int *I, int *J, float *val, int N, int nz) {
I[0] = 0, J[0] = 0, J[1] = 1;
val[0] = (float)rand() / RAND_MAX + 10.0f;
val[1] = (float)rand() / RAND_MAX;
int start;
for (int i = 1; i < N; i++) {
if (i > 1) {
I[i] = I[i - 1] + 3;
} else {
I[1] = 2;
}
start = (i - 1) * 3 + 2;
J[start] = i - 1;
J[start + 1] = i;
if (i < N - 1) {
J[start + 2] = i + 1;
}
val[start] = val[start - 1];
val[start + 1] = (float)rand() / RAND_MAX + 10.0f;
if (i < N - 1) {
val[start + 2] = (float)rand() / RAND_MAX;
}
}
I[N] = nz;
}
// I - contains location of the given non-zero element in the row of the matrix
// J - contains location of the given non-zero element in the column of the
// matrix val - contains values of the given non-zero elements of the matrix
// inputVecX - input vector to be multiplied
// outputVecY - resultant vector
void cpuSpMV(int *I, int *J, float *val, int nnz, int num_rows, float alpha,
float *inputVecX, float *outputVecY) {
for (int i = 0; i < num_rows; i++) {
int num_elems_this_row = I[i + 1] - I[i];
float output = 0.0;
for (int j = 0; j < num_elems_this_row; j++) {
output += alpha * val[I[i] + j] * inputVecX[J[I[i] + j]];
}
outputVecY[i] = output;
}
return;
}
float dotProduct(float *vecA, float *vecB, int size) {
float result = 0.0;
for (int i = 0; i < size; i++) {
result = result + (vecA[i] * vecB[i]);
}
return result;
}
void scaleVector(float *vec, float alpha, int size) {
for (int i = 0; i < size; i++) {
vec[i] = alpha * vec[i];
}
}
void saxpy(float *x, float *y, float a, int size) {
for (int i = 0; i < size; i++) {
y[i] = a * x[i] + y[i];
}
}
void cpuConjugateGrad(int *I, int *J, float *val, float *x, float *Ax, float *p,
float *r, int nnz, int N, float tol) {
int max_iter = 10000;
float alpha = 1.0;
float alpham1 = -1.0;
float r0 = 0.0, b, a, na;
cpuSpMV(I, J, val, nnz, N, alpha, x, Ax);
saxpy(Ax, r, alpham1, N);
float r1 = dotProduct(r, r, N);
int k = 1;
while (r1 > tol * tol && k <= max_iter) {
if (k > 1) {
b = r1 / r0;
scaleVector(p, b, N);
saxpy(r, p, alpha, N);
} else {
for (int i = 0; i < N; i++) p[i] = r[i];
}
cpuSpMV(I, J, val, nnz, N, alpha, p, Ax);
float dot = dotProduct(p, Ax, N);
a = r1 / dot;
saxpy(p, x, a, N);
na = -a;
saxpy(Ax, r, na, N);
r0 = r1;
r1 = dotProduct(r, r, N);
printf("\nCPU code iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
}
__device__ void gpuSpMV(int *I, int *J, float *val, int nnz, int num_rows,
float alpha, float *inputVecX, float *outputVecY,
cg::thread_block &cta,
const cg::multi_grid_group &multi_grid) {
for (int i = multi_grid.thread_rank(); i < num_rows; i += multi_grid.size()) {
int row_elem = I[i];
int next_row_elem = I[i + 1];
int num_elems_this_row = next_row_elem - row_elem;
float output = 0.0;
for (int j = 0; j < num_elems_this_row; j++) {
output += alpha * val[row_elem + j] * inputVecX[J[row_elem + j]];
}
outputVecY[i] = output;
}
}
__device__ void gpuSaxpy(float *x, float *y, float a, int size,
const cg::multi_grid_group &multi_grid) {
for (int i = multi_grid.thread_rank(); i < size; i += multi_grid.size()) {
y[i] = a * x[i] + y[i];
}
}
__device__ void gpuDotProduct(float *vecA, float *vecB, int size,
const cg::thread_block &cta,
const cg::multi_grid_group &multi_grid) {
extern __shared__ double tmp[];
double temp_sum = 0.0;
for (int i = multi_grid.thread_rank(); i < size; i += multi_grid.size()) {
temp_sum += (double)(vecA[i] * vecB[i]);
}
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
temp_sum = cg::reduce(tile32, temp_sum, cg::plus<double>());
if (tile32.thread_rank() == 0) {
tmp[tile32.meta_group_rank()] = temp_sum;
}
cg::sync(cta);
if (tile32.meta_group_rank() == 0) {
temp_sum = tile32.thread_rank() < tile32.meta_group_size() ? tmp[tile32.thread_rank()] : 0.0;
temp_sum = cg::reduce(tile32, temp_sum, cg::plus<double>());
if (tile32.thread_rank() == 0) {
atomicAdd(&grid_dot_result, temp_sum);
}
}
}
__device__ void gpuCopyVector(float *srcA, float *destB, int size,
const cg::multi_grid_group &multi_grid) {
for (int i = multi_grid.thread_rank(); i < size; i += multi_grid.size()) {
destB[i] = srcA[i];
}
}
__device__ void gpuScaleVectorAndSaxpy(float *x, float *y, float a, float scale, int size,
const cg::multi_grid_group &multi_grid) {
for (int i = multi_grid.thread_rank(); i < size; i += multi_grid.size()) {
y[i] = a * x[i] + scale * y[i];
}
}
extern "C" __global__ void multiGpuConjugateGradient(
int *I, int *J, float *val, float *x, float *Ax, float *p, float *r,
double *dot_result, int nnz, int N, float tol) {
cg::thread_block cta = cg::this_thread_block();
cg::grid_group grid = cg::this_grid();
cg::multi_grid_group multi_grid = cg::this_multi_grid();
const int max_iter = 10000;
float alpha = 1.0;
float alpham1 = -1.0;
float r0 = 0.0, r1, b, a, na;
for (int i = multi_grid.thread_rank(); i < N; i += multi_grid.size()) {
r[i] = 1.0;
x[i] = 0.0;
}
cg::sync(grid);
gpuSpMV(I, J, val, nnz, N, alpha, x, Ax, cta, multi_grid);
cg::sync(grid);
gpuSaxpy(Ax, r, alpham1, N, multi_grid);
cg::sync(grid);
gpuDotProduct(r, r, N, cta, multi_grid);
cg::sync(grid);
if (grid.thread_rank() == 0) {
atomicAdd_system(dot_result, grid_dot_result);
grid_dot_result = 0.0;
}
cg::sync(multi_grid);
r1 = *dot_result;
int k = 1;
while (r1 > tol * tol && k <= max_iter) {
if (k > 1) {
b = r1 / r0;
gpuScaleVectorAndSaxpy(r, p, alpha, b, N, multi_grid);
} else {
gpuCopyVector(r, p, N, multi_grid);
}
cg::sync(multi_grid);
gpuSpMV(I, J, val, nnz, N, alpha, p, Ax, cta, multi_grid);
if (multi_grid.thread_rank() == 0) {
*dot_result = 0.0;
}
cg::sync(multi_grid);
gpuDotProduct(p, Ax, N, cta, multi_grid);
cg::sync(grid);
if (grid.thread_rank() == 0) {
atomicAdd_system(dot_result, grid_dot_result);
grid_dot_result = 0.0;
}
cg::sync(multi_grid);
a = r1 / *dot_result;
gpuSaxpy(p, x, a, N, multi_grid);
na = -a;
gpuSaxpy(Ax, r, na, N, multi_grid);
r0 = r1;
cg::sync(multi_grid);
if (multi_grid.thread_rank() == 0) {
*dot_result = 0.0;
}
cg::sync(multi_grid);
gpuDotProduct(r, r, N, cta, multi_grid);
cg::sync(grid);
if (grid.thread_rank() == 0) {
atomicAdd_system(dot_result, grid_dot_result);
grid_dot_result = 0.0;
}
cg::sync(multi_grid);
r1 = *dot_result;
k++;
}
}
// Map of device version to device number
std::multimap<std::pair<int, int>, int> getIdenticalGPUs() {
int numGpus = 0;
checkCudaErrors(hipGetDeviceCount(&numGpus));
std::multimap<std::pair<int, int>, int> identicalGpus;
for (int i = 0; i < numGpus; i++) {
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, i));
// Filter unsupported devices
if (deviceProp.cooperativeMultiDeviceLaunch &&
deviceProp.concurrentManagedAccess) {
identicalGpus.emplace(std::make_pair(deviceProp.major, deviceProp.minor), i);
}
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n", i,
deviceProp.name, deviceProp.major, deviceProp.minor);
}
return identicalGpus;
}
int main(int argc, char **argv) {
constexpr size_t kNumGpusRequired = 2;
int N = 0, nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-5f;
float *x;
float rhs = 1.0;
float r1;
float *r, *p, *Ax;
printf("Starting [%s]...\n", sSDKname);
auto gpusByArch = getIdenticalGPUs();
auto it = gpusByArch.begin();
auto end = gpusByArch.end();
auto bestFit = std::make_pair(it, it);
// use std::distance to find the largest number of GPUs amongst architectures
auto distance = [](decltype(bestFit) p){return std::distance(p.first, p.second);};
// Read each unique key/pair element in order
for (; it != end; it = gpusByArch.upper_bound(it->first)) {
// first and second are iterators bounded within the architecture group
auto testFit = gpusByArch.equal_range(it->first);
// Always use devices with highest architecture version or whichever has the most devices available
if (distance(bestFit) <= distance(testFit))
bestFit = testFit;
}
if (distance(bestFit) < kNumGpusRequired) {
printf(
"No Two or more GPUs with same architecture capable of "
"cooperativeMultiDeviceLaunch & concurrentManagedAccess found. "
"\nWaiving the sample\n");
exit(EXIT_WAIVED);
}
std::set<int> bestFitDeviceIds;
// check & select peer-to-peer access capable GPU devices as enabling p2p access between participating
// GPUs gives better performance for multi_grid sync.
for (auto itr = bestFit.first; itr != bestFit.second; itr++) {
int deviceId = itr->second;
checkCudaErrors(hipSetDevice(deviceId));
std::for_each(itr, bestFit.second, [&deviceId, &bestFitDeviceIds, &kNumGpusRequired](decltype(*itr) mapPair) {
if (deviceId != mapPair.second)
{
int access = 0;
checkCudaErrors(hipDeviceCanAccessPeer(&access, deviceId, mapPair.second));
printf("Device=%d %s Access Peer Device=%d\n", deviceId, access ? "CAN" : "CANNOT", mapPair.second);
if (access && bestFitDeviceIds.size() < kNumGpusRequired) {
bestFitDeviceIds.emplace(deviceId);
bestFitDeviceIds.emplace(mapPair.second);
}
else {
printf("Ignoring device %i (max devices exceeded)\n", mapPair.second);
}
}
});
if (bestFitDeviceIds.size() >= kNumGpusRequired)
{
printf("Selected p2p capable devices - ");
for (auto devicesItr = bestFitDeviceIds.begin(); devicesItr != bestFitDeviceIds.end(); devicesItr++)
{
printf("deviceId = %d ", *devicesItr);
}
printf("\n");
break;
}
}
// if bestFitDeviceIds.size() == 0 it means the GPUs in system are not p2p capable,
// hence we add it without p2p capability check.
if (!bestFitDeviceIds.size())
{
printf("Devices involved are not p2p capable.. selecting %zu of them\n", kNumGpusRequired);
std::for_each(bestFit.first, bestFit.second, [&bestFitDeviceIds, &kNumGpusRequired](decltype(*bestFit.first) mapPair) {
if (bestFitDeviceIds.size() < kNumGpusRequired) {
bestFitDeviceIds.emplace(mapPair.second);
}
else {
printf("Ignoring device %i (max devices exceeded)\n", mapPair.second);
}
// Insert the sequence into the deviceIds set
});
}
else
{
// perform hipDeviceEnablePeerAccess in both directions for all participating devices
// of a cudaLaunchCooperativeKernelMultiDevice call this gives better performance for multi_grid sync.
for (auto p1_itr = bestFitDeviceIds.begin(); p1_itr != bestFitDeviceIds.end(); p1_itr++)
{
checkCudaErrors(hipSetDevice(*p1_itr));
for (auto p2_itr = bestFitDeviceIds.begin(); p2_itr != bestFitDeviceIds.end(); p2_itr++)
{
if (*p1_itr != *p2_itr)
{
checkCudaErrors(hipDeviceEnablePeerAccess(*p2_itr, 0 ));
checkCudaErrors(hipSetDevice(*p1_itr));
}
}
}
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
N = 10485760 * 2;
nz = (N - 2) * 3 + 4;
checkCudaErrors(hipMallocManaged((void **)&I, sizeof(int) * (N + 1)));
checkCudaErrors(hipMallocManaged((void **)&J, sizeof(int) * nz));
checkCudaErrors(hipMallocManaged((void **)&val, sizeof(float) * nz));
float *val_cpu = (float *)malloc(sizeof(float) * nz);
genTridiag(I, J, val_cpu, N, nz);
memcpy(val, val_cpu, sizeof(float) * nz);
checkCudaErrors(
hipMemAdvise(I, sizeof(int) * (N + 1), hipMemAdviseSetReadMostly, 0));
checkCudaErrors(
hipMemAdvise(J, sizeof(int) * nz, hipMemAdviseSetReadMostly, 0));
checkCudaErrors(
hipMemAdvise(val, sizeof(float) * nz, hipMemAdviseSetReadMostly, 0));
checkCudaErrors(hipMallocManaged((void **)&x, sizeof(float) * N));
double *dot_result;
checkCudaErrors(hipMallocManaged((void **)&dot_result, sizeof(double)));
checkCudaErrors(hipMemset(dot_result, 0, sizeof(double)));
// temp memory for ConjugateGradient
checkCudaErrors(hipMallocManaged((void **)&r, N * sizeof(float)));
checkCudaErrors(hipMallocManaged((void **)&p, N * sizeof(float)));
checkCudaErrors(hipMallocManaged((void **)&Ax, N * sizeof(float)));
std::cout << "\nRunning on GPUs = " << kNumGpusRequired << std::endl;
hipStream_t nStreams[kNumGpusRequired];
int sMemSize = sizeof(double) * ((THREADS_PER_BLOCK/32) + 1);
int numBlocksPerSm = INT_MAX;
int numThreads = THREADS_PER_BLOCK;
int numSms = INT_MAX;
auto deviceId = bestFitDeviceIds.begin();
// set numSms & numBlocksPerSm to be lowest of 2 devices
while (deviceId != bestFitDeviceIds.end()) {
hipDeviceProp_t deviceProp;
checkCudaErrors(hipSetDevice(*deviceId));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, *deviceId));
int numBlocksPerSm_current=0;
checkCudaErrors(hipOccupancyMaxActiveBlocksPerMultiprocessor(
&numBlocksPerSm_current, multiGpuConjugateGradient, numThreads, sMemSize));
if (numBlocksPerSm > numBlocksPerSm_current)
{
numBlocksPerSm = numBlocksPerSm_current;
}
if (numSms > deviceProp.multiProcessorCount)
{
numSms = deviceProp.multiProcessorCount;
}
deviceId++;
}
if (!numBlocksPerSm) {
printf(
"Max active blocks per SM is returned as 0.\n Hence, Waiving the "
"sample\n");
exit(EXIT_WAIVED);
}
int device_count = 0;
int totalThreadsPerGPU = numSms * numBlocksPerSm * THREADS_PER_BLOCK;
deviceId = bestFitDeviceIds.begin();;
while (deviceId != bestFitDeviceIds.end()) {
checkCudaErrors(hipSetDevice(*deviceId));
checkCudaErrors(hipStreamCreate(&nStreams[device_count]));
int perGPUIter = N / (totalThreadsPerGPU * kNumGpusRequired);
int offset_Ax = device_count * totalThreadsPerGPU;
int offset_r = device_count * totalThreadsPerGPU;
int offset_p = device_count * totalThreadsPerGPU;
int offset_x = device_count * totalThreadsPerGPU;
checkCudaErrors(hipMemPrefetchAsync(I, sizeof(int) * N, *deviceId,
nStreams[device_count]));
checkCudaErrors(hipMemPrefetchAsync(val, sizeof(float) * nz, *deviceId,
nStreams[device_count]));
checkCudaErrors(hipMemPrefetchAsync(J, sizeof(float) * nz, *deviceId,
nStreams[device_count]));
if (offset_Ax <= N) {
for (int i = 0; i < perGPUIter; i++) {
hipMemAdvise(Ax + offset_Ax, sizeof(float) * totalThreadsPerGPU,
hipMemAdviseSetPreferredLocation, *deviceId);
hipMemAdvise(r + offset_r, sizeof(float) * totalThreadsPerGPU,
hipMemAdviseSetPreferredLocation, *deviceId);
hipMemAdvise(x + offset_x, sizeof(float) * totalThreadsPerGPU,
hipMemAdviseSetPreferredLocation, *deviceId);
hipMemAdvise(p + offset_p, sizeof(float) * totalThreadsPerGPU,
hipMemAdviseSetPreferredLocation, *deviceId);
hipMemAdvise(Ax + offset_Ax, sizeof(float) * totalThreadsPerGPU,
hipMemAdviseSetAccessedBy, *deviceId);
hipMemAdvise(r + offset_r, sizeof(float) * totalThreadsPerGPU,
hipMemAdviseSetAccessedBy, *deviceId);
hipMemAdvise(p + offset_p, sizeof(float) * totalThreadsPerGPU,
hipMemAdviseSetAccessedBy, *deviceId);
hipMemAdvise(x + offset_x, sizeof(float) * totalThreadsPerGPU,
hipMemAdviseSetAccessedBy, *deviceId);
offset_Ax += totalThreadsPerGPU * kNumGpusRequired;
offset_r += totalThreadsPerGPU * kNumGpusRequired;
offset_p += totalThreadsPerGPU * kNumGpusRequired;
offset_x += totalThreadsPerGPU * kNumGpusRequired;
if (offset_Ax >= N) {
break;
}
}
}
device_count++;
deviceId++;
}
#if ENABLE_CPU_DEBUG_CODE
float *Ax_cpu = (float *)malloc(sizeof(float) * N);
float *r_cpu = (float *)malloc(sizeof(float) * N);
float *p_cpu = (float *)malloc(sizeof(float) * N);
float *x_cpu = (float *)malloc(sizeof(float) * N);
for (int i = 0; i < N; i++) {
r_cpu[i] = 1.0;
Ax_cpu[i] = x_cpu[i] = 0.0;
}
#endif
printf("Total threads per GPU = %d numBlocksPerSm = %d\n",
numSms * numBlocksPerSm * THREADS_PER_BLOCK, numBlocksPerSm);
dim3 dimGrid(numSms * numBlocksPerSm, 1, 1), dimBlock(THREADS_PER_BLOCK, 1, 1);
void *kernelArgs[] = {
(void *)&I, (void *)&J, (void *)&val, (void *)&x,
(void *)&Ax, (void *)&p, (void *)&r, (void *)&dot_result,
(void *)&nz, (void *)&N, (void *)&tol,
};
cudaLaunchParams *launchParamsList = (cudaLaunchParams *)malloc(
sizeof(cudaLaunchParams) * kNumGpusRequired);
for (int i = 0; i < kNumGpusRequired; i++) {
launchParamsList[i].func = (void *)multiGpuConjugateGradient;
launchParamsList[i].gridDim = dimGrid;
launchParamsList[i].blockDim = dimBlock;
launchParamsList[i].sharedMem = sMemSize;
launchParamsList[i].stream = nStreams[i];
launchParamsList[i].args = kernelArgs;
}
printf("Launching kernel\n");
checkCudaErrors(cudaLaunchCooperativeKernelMultiDevice(
launchParamsList, kNumGpusRequired,
cudaCooperativeLaunchMultiDeviceNoPreSync |
cudaCooperativeLaunchMultiDeviceNoPostSync));
checkCudaErrors(
hipMemPrefetchAsync(x, sizeof(float) * N, hipCpuDeviceId));
checkCudaErrors(
hipMemPrefetchAsync(dot_result, sizeof(double), hipCpuDeviceId));
deviceId = bestFitDeviceIds.begin();;
device_count = 0;
while (deviceId != bestFitDeviceIds.end()) {
checkCudaErrors(hipSetDevice(*deviceId));
checkCudaErrors(hipStreamSynchronize(nStreams[device_count++]));
deviceId++;
}
r1 = (float)*dot_result;
printf("GPU Final, residual = %e \n ", sqrt(r1));
#if ENABLE_CPU_DEBUG_CODE
cpuConjugateGrad(I, J, val, x_cpu, Ax_cpu, p_cpu, r_cpu, nz, N, tol);
#endif
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++) {
rsum = 0.0;
for (int j = I[i]; j < I[i + 1]; j++) {
rsum += val_cpu[j] * x[J[j]];
}
diff = fabs(rsum - rhs);
if (diff > err) {
err = diff;
}
}
checkCudaErrors(hipFree(I));
checkCudaErrors(hipFree(J));
checkCudaErrors(hipFree(val));
checkCudaErrors(hipFree(x));
checkCudaErrors(hipFree(r));
checkCudaErrors(hipFree(p));
checkCudaErrors(hipFree(Ax));
checkCudaErrors(hipFree(dot_result));
free(val_cpu);
#if ENABLE_CPU_DEBUG_CODE
free(Ax_cpu);
free(r_cpu);
free(p_cpu);
free(x_cpu);
#endif
printf("Test Summary: Error amount = %f \n", err);
fprintf(stdout, "&&&& conjugateGradientMultiDeviceCG %s\n",
(sqrt(r1) < tol) ? "PASSED" : "FAILED");
exit((sqrt(r1) < tol) ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
0f749c217fa328582f2ffdfdda5b840da1ed09bf.cu
|
/*
* Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample implements a conjugate gradient solver on multiple GPU using
* Multi Device Cooperative Groups, also uses Unified Memory optimized using
* prefetching and usage hints.
*
*/
// includes, system
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <map>
#include <iostream>
#include <set>
#include <utility>
#include <cuda_runtime.h>
// Utilities and system includes
#include <helper_cuda.h> // helper function CUDA error checking and initialization
#include <helper_functions.h> // helper for shared functions common to CUDA Samples
#include <cooperative_groups.h>
#include <cooperative_groups/reduce.h>
namespace cg = cooperative_groups;
const char *sSDKname = "conjugateGradientMultiDeviceCG";
#define ENABLE_CPU_DEBUG_CODE 0
#define THREADS_PER_BLOCK 512
__device__ double grid_dot_result = 0.0;
/* genTridiag: generate a random tridiagonal symmetric matrix */
void genTridiag(int *I, int *J, float *val, int N, int nz) {
I[0] = 0, J[0] = 0, J[1] = 1;
val[0] = (float)rand() / RAND_MAX + 10.0f;
val[1] = (float)rand() / RAND_MAX;
int start;
for (int i = 1; i < N; i++) {
if (i > 1) {
I[i] = I[i - 1] + 3;
} else {
I[1] = 2;
}
start = (i - 1) * 3 + 2;
J[start] = i - 1;
J[start + 1] = i;
if (i < N - 1) {
J[start + 2] = i + 1;
}
val[start] = val[start - 1];
val[start + 1] = (float)rand() / RAND_MAX + 10.0f;
if (i < N - 1) {
val[start + 2] = (float)rand() / RAND_MAX;
}
}
I[N] = nz;
}
// I - contains location of the given non-zero element in the row of the matrix
// J - contains location of the given non-zero element in the column of the
// matrix val - contains values of the given non-zero elements of the matrix
// inputVecX - input vector to be multiplied
// outputVecY - resultant vector
void cpuSpMV(int *I, int *J, float *val, int nnz, int num_rows, float alpha,
float *inputVecX, float *outputVecY) {
for (int i = 0; i < num_rows; i++) {
int num_elems_this_row = I[i + 1] - I[i];
float output = 0.0;
for (int j = 0; j < num_elems_this_row; j++) {
output += alpha * val[I[i] + j] * inputVecX[J[I[i] + j]];
}
outputVecY[i] = output;
}
return;
}
float dotProduct(float *vecA, float *vecB, int size) {
float result = 0.0;
for (int i = 0; i < size; i++) {
result = result + (vecA[i] * vecB[i]);
}
return result;
}
void scaleVector(float *vec, float alpha, int size) {
for (int i = 0; i < size; i++) {
vec[i] = alpha * vec[i];
}
}
void saxpy(float *x, float *y, float a, int size) {
for (int i = 0; i < size; i++) {
y[i] = a * x[i] + y[i];
}
}
void cpuConjugateGrad(int *I, int *J, float *val, float *x, float *Ax, float *p,
float *r, int nnz, int N, float tol) {
int max_iter = 10000;
float alpha = 1.0;
float alpham1 = -1.0;
float r0 = 0.0, b, a, na;
cpuSpMV(I, J, val, nnz, N, alpha, x, Ax);
saxpy(Ax, r, alpham1, N);
float r1 = dotProduct(r, r, N);
int k = 1;
while (r1 > tol * tol && k <= max_iter) {
if (k > 1) {
b = r1 / r0;
scaleVector(p, b, N);
saxpy(r, p, alpha, N);
} else {
for (int i = 0; i < N; i++) p[i] = r[i];
}
cpuSpMV(I, J, val, nnz, N, alpha, p, Ax);
float dot = dotProduct(p, Ax, N);
a = r1 / dot;
saxpy(p, x, a, N);
na = -a;
saxpy(Ax, r, na, N);
r0 = r1;
r1 = dotProduct(r, r, N);
printf("\nCPU code iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
}
__device__ void gpuSpMV(int *I, int *J, float *val, int nnz, int num_rows,
float alpha, float *inputVecX, float *outputVecY,
cg::thread_block &cta,
const cg::multi_grid_group &multi_grid) {
for (int i = multi_grid.thread_rank(); i < num_rows; i += multi_grid.size()) {
int row_elem = I[i];
int next_row_elem = I[i + 1];
int num_elems_this_row = next_row_elem - row_elem;
float output = 0.0;
for (int j = 0; j < num_elems_this_row; j++) {
output += alpha * val[row_elem + j] * inputVecX[J[row_elem + j]];
}
outputVecY[i] = output;
}
}
__device__ void gpuSaxpy(float *x, float *y, float a, int size,
const cg::multi_grid_group &multi_grid) {
for (int i = multi_grid.thread_rank(); i < size; i += multi_grid.size()) {
y[i] = a * x[i] + y[i];
}
}
__device__ void gpuDotProduct(float *vecA, float *vecB, int size,
const cg::thread_block &cta,
const cg::multi_grid_group &multi_grid) {
extern __shared__ double tmp[];
double temp_sum = 0.0;
for (int i = multi_grid.thread_rank(); i < size; i += multi_grid.size()) {
temp_sum += (double)(vecA[i] * vecB[i]);
}
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
temp_sum = cg::reduce(tile32, temp_sum, cg::plus<double>());
if (tile32.thread_rank() == 0) {
tmp[tile32.meta_group_rank()] = temp_sum;
}
cg::sync(cta);
if (tile32.meta_group_rank() == 0) {
temp_sum = tile32.thread_rank() < tile32.meta_group_size() ? tmp[tile32.thread_rank()] : 0.0;
temp_sum = cg::reduce(tile32, temp_sum, cg::plus<double>());
if (tile32.thread_rank() == 0) {
atomicAdd(&grid_dot_result, temp_sum);
}
}
}
__device__ void gpuCopyVector(float *srcA, float *destB, int size,
const cg::multi_grid_group &multi_grid) {
for (int i = multi_grid.thread_rank(); i < size; i += multi_grid.size()) {
destB[i] = srcA[i];
}
}
__device__ void gpuScaleVectorAndSaxpy(float *x, float *y, float a, float scale, int size,
const cg::multi_grid_group &multi_grid) {
for (int i = multi_grid.thread_rank(); i < size; i += multi_grid.size()) {
y[i] = a * x[i] + scale * y[i];
}
}
extern "C" __global__ void multiGpuConjugateGradient(
int *I, int *J, float *val, float *x, float *Ax, float *p, float *r,
double *dot_result, int nnz, int N, float tol) {
cg::thread_block cta = cg::this_thread_block();
cg::grid_group grid = cg::this_grid();
cg::multi_grid_group multi_grid = cg::this_multi_grid();
const int max_iter = 10000;
float alpha = 1.0;
float alpham1 = -1.0;
float r0 = 0.0, r1, b, a, na;
for (int i = multi_grid.thread_rank(); i < N; i += multi_grid.size()) {
r[i] = 1.0;
x[i] = 0.0;
}
cg::sync(grid);
gpuSpMV(I, J, val, nnz, N, alpha, x, Ax, cta, multi_grid);
cg::sync(grid);
gpuSaxpy(Ax, r, alpham1, N, multi_grid);
cg::sync(grid);
gpuDotProduct(r, r, N, cta, multi_grid);
cg::sync(grid);
if (grid.thread_rank() == 0) {
atomicAdd_system(dot_result, grid_dot_result);
grid_dot_result = 0.0;
}
cg::sync(multi_grid);
r1 = *dot_result;
int k = 1;
while (r1 > tol * tol && k <= max_iter) {
if (k > 1) {
b = r1 / r0;
gpuScaleVectorAndSaxpy(r, p, alpha, b, N, multi_grid);
} else {
gpuCopyVector(r, p, N, multi_grid);
}
cg::sync(multi_grid);
gpuSpMV(I, J, val, nnz, N, alpha, p, Ax, cta, multi_grid);
if (multi_grid.thread_rank() == 0) {
*dot_result = 0.0;
}
cg::sync(multi_grid);
gpuDotProduct(p, Ax, N, cta, multi_grid);
cg::sync(grid);
if (grid.thread_rank() == 0) {
atomicAdd_system(dot_result, grid_dot_result);
grid_dot_result = 0.0;
}
cg::sync(multi_grid);
a = r1 / *dot_result;
gpuSaxpy(p, x, a, N, multi_grid);
na = -a;
gpuSaxpy(Ax, r, na, N, multi_grid);
r0 = r1;
cg::sync(multi_grid);
if (multi_grid.thread_rank() == 0) {
*dot_result = 0.0;
}
cg::sync(multi_grid);
gpuDotProduct(r, r, N, cta, multi_grid);
cg::sync(grid);
if (grid.thread_rank() == 0) {
atomicAdd_system(dot_result, grid_dot_result);
grid_dot_result = 0.0;
}
cg::sync(multi_grid);
r1 = *dot_result;
k++;
}
}
// Map of device version to device number
std::multimap<std::pair<int, int>, int> getIdenticalGPUs() {
int numGpus = 0;
checkCudaErrors(cudaGetDeviceCount(&numGpus));
std::multimap<std::pair<int, int>, int> identicalGpus;
for (int i = 0; i < numGpus; i++) {
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, i));
// Filter unsupported devices
if (deviceProp.cooperativeMultiDeviceLaunch &&
deviceProp.concurrentManagedAccess) {
identicalGpus.emplace(std::make_pair(deviceProp.major, deviceProp.minor), i);
}
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n", i,
deviceProp.name, deviceProp.major, deviceProp.minor);
}
return identicalGpus;
}
int main(int argc, char **argv) {
constexpr size_t kNumGpusRequired = 2;
int N = 0, nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-5f;
float *x;
float rhs = 1.0;
float r1;
float *r, *p, *Ax;
printf("Starting [%s]...\n", sSDKname);
auto gpusByArch = getIdenticalGPUs();
auto it = gpusByArch.begin();
auto end = gpusByArch.end();
auto bestFit = std::make_pair(it, it);
// use std::distance to find the largest number of GPUs amongst architectures
auto distance = [](decltype(bestFit) p){return std::distance(p.first, p.second);};
// Read each unique key/pair element in order
for (; it != end; it = gpusByArch.upper_bound(it->first)) {
// first and second are iterators bounded within the architecture group
auto testFit = gpusByArch.equal_range(it->first);
// Always use devices with highest architecture version or whichever has the most devices available
if (distance(bestFit) <= distance(testFit))
bestFit = testFit;
}
if (distance(bestFit) < kNumGpusRequired) {
printf(
"No Two or more GPUs with same architecture capable of "
"cooperativeMultiDeviceLaunch & concurrentManagedAccess found. "
"\nWaiving the sample\n");
exit(EXIT_WAIVED);
}
std::set<int> bestFitDeviceIds;
// check & select peer-to-peer access capable GPU devices as enabling p2p access between participating
// GPUs gives better performance for multi_grid sync.
for (auto itr = bestFit.first; itr != bestFit.second; itr++) {
int deviceId = itr->second;
checkCudaErrors(cudaSetDevice(deviceId));
std::for_each(itr, bestFit.second, [&deviceId, &bestFitDeviceIds, &kNumGpusRequired](decltype(*itr) mapPair) {
if (deviceId != mapPair.second)
{
int access = 0;
checkCudaErrors(cudaDeviceCanAccessPeer(&access, deviceId, mapPair.second));
printf("Device=%d %s Access Peer Device=%d\n", deviceId, access ? "CAN" : "CANNOT", mapPair.second);
if (access && bestFitDeviceIds.size() < kNumGpusRequired) {
bestFitDeviceIds.emplace(deviceId);
bestFitDeviceIds.emplace(mapPair.second);
}
else {
printf("Ignoring device %i (max devices exceeded)\n", mapPair.second);
}
}
});
if (bestFitDeviceIds.size() >= kNumGpusRequired)
{
printf("Selected p2p capable devices - ");
for (auto devicesItr = bestFitDeviceIds.begin(); devicesItr != bestFitDeviceIds.end(); devicesItr++)
{
printf("deviceId = %d ", *devicesItr);
}
printf("\n");
break;
}
}
// if bestFitDeviceIds.size() == 0 it means the GPUs in system are not p2p capable,
// hence we add it without p2p capability check.
if (!bestFitDeviceIds.size())
{
printf("Devices involved are not p2p capable.. selecting %zu of them\n", kNumGpusRequired);
std::for_each(bestFit.first, bestFit.second, [&bestFitDeviceIds, &kNumGpusRequired](decltype(*bestFit.first) mapPair) {
if (bestFitDeviceIds.size() < kNumGpusRequired) {
bestFitDeviceIds.emplace(mapPair.second);
}
else {
printf("Ignoring device %i (max devices exceeded)\n", mapPair.second);
}
// Insert the sequence into the deviceIds set
});
}
else
{
// perform cudaDeviceEnablePeerAccess in both directions for all participating devices
// of a cudaLaunchCooperativeKernelMultiDevice call this gives better performance for multi_grid sync.
for (auto p1_itr = bestFitDeviceIds.begin(); p1_itr != bestFitDeviceIds.end(); p1_itr++)
{
checkCudaErrors(cudaSetDevice(*p1_itr));
for (auto p2_itr = bestFitDeviceIds.begin(); p2_itr != bestFitDeviceIds.end(); p2_itr++)
{
if (*p1_itr != *p2_itr)
{
checkCudaErrors(cudaDeviceEnablePeerAccess(*p2_itr, 0 ));
checkCudaErrors(cudaSetDevice(*p1_itr));
}
}
}
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
N = 10485760 * 2;
nz = (N - 2) * 3 + 4;
checkCudaErrors(cudaMallocManaged((void **)&I, sizeof(int) * (N + 1)));
checkCudaErrors(cudaMallocManaged((void **)&J, sizeof(int) * nz));
checkCudaErrors(cudaMallocManaged((void **)&val, sizeof(float) * nz));
float *val_cpu = (float *)malloc(sizeof(float) * nz);
genTridiag(I, J, val_cpu, N, nz);
memcpy(val, val_cpu, sizeof(float) * nz);
checkCudaErrors(
cudaMemAdvise(I, sizeof(int) * (N + 1), cudaMemAdviseSetReadMostly, 0));
checkCudaErrors(
cudaMemAdvise(J, sizeof(int) * nz, cudaMemAdviseSetReadMostly, 0));
checkCudaErrors(
cudaMemAdvise(val, sizeof(float) * nz, cudaMemAdviseSetReadMostly, 0));
checkCudaErrors(cudaMallocManaged((void **)&x, sizeof(float) * N));
double *dot_result;
checkCudaErrors(cudaMallocManaged((void **)&dot_result, sizeof(double)));
checkCudaErrors(cudaMemset(dot_result, 0, sizeof(double)));
// temp memory for ConjugateGradient
checkCudaErrors(cudaMallocManaged((void **)&r, N * sizeof(float)));
checkCudaErrors(cudaMallocManaged((void **)&p, N * sizeof(float)));
checkCudaErrors(cudaMallocManaged((void **)&Ax, N * sizeof(float)));
std::cout << "\nRunning on GPUs = " << kNumGpusRequired << std::endl;
cudaStream_t nStreams[kNumGpusRequired];
int sMemSize = sizeof(double) * ((THREADS_PER_BLOCK/32) + 1);
int numBlocksPerSm = INT_MAX;
int numThreads = THREADS_PER_BLOCK;
int numSms = INT_MAX;
auto deviceId = bestFitDeviceIds.begin();
// set numSms & numBlocksPerSm to be lowest of 2 devices
while (deviceId != bestFitDeviceIds.end()) {
cudaDeviceProp deviceProp;
checkCudaErrors(cudaSetDevice(*deviceId));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, *deviceId));
int numBlocksPerSm_current=0;
checkCudaErrors(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&numBlocksPerSm_current, multiGpuConjugateGradient, numThreads, sMemSize));
if (numBlocksPerSm > numBlocksPerSm_current)
{
numBlocksPerSm = numBlocksPerSm_current;
}
if (numSms > deviceProp.multiProcessorCount)
{
numSms = deviceProp.multiProcessorCount;
}
deviceId++;
}
if (!numBlocksPerSm) {
printf(
"Max active blocks per SM is returned as 0.\n Hence, Waiving the "
"sample\n");
exit(EXIT_WAIVED);
}
int device_count = 0;
int totalThreadsPerGPU = numSms * numBlocksPerSm * THREADS_PER_BLOCK;
deviceId = bestFitDeviceIds.begin();;
while (deviceId != bestFitDeviceIds.end()) {
checkCudaErrors(cudaSetDevice(*deviceId));
checkCudaErrors(cudaStreamCreate(&nStreams[device_count]));
int perGPUIter = N / (totalThreadsPerGPU * kNumGpusRequired);
int offset_Ax = device_count * totalThreadsPerGPU;
int offset_r = device_count * totalThreadsPerGPU;
int offset_p = device_count * totalThreadsPerGPU;
int offset_x = device_count * totalThreadsPerGPU;
checkCudaErrors(cudaMemPrefetchAsync(I, sizeof(int) * N, *deviceId,
nStreams[device_count]));
checkCudaErrors(cudaMemPrefetchAsync(val, sizeof(float) * nz, *deviceId,
nStreams[device_count]));
checkCudaErrors(cudaMemPrefetchAsync(J, sizeof(float) * nz, *deviceId,
nStreams[device_count]));
if (offset_Ax <= N) {
for (int i = 0; i < perGPUIter; i++) {
cudaMemAdvise(Ax + offset_Ax, sizeof(float) * totalThreadsPerGPU,
cudaMemAdviseSetPreferredLocation, *deviceId);
cudaMemAdvise(r + offset_r, sizeof(float) * totalThreadsPerGPU,
cudaMemAdviseSetPreferredLocation, *deviceId);
cudaMemAdvise(x + offset_x, sizeof(float) * totalThreadsPerGPU,
cudaMemAdviseSetPreferredLocation, *deviceId);
cudaMemAdvise(p + offset_p, sizeof(float) * totalThreadsPerGPU,
cudaMemAdviseSetPreferredLocation, *deviceId);
cudaMemAdvise(Ax + offset_Ax, sizeof(float) * totalThreadsPerGPU,
cudaMemAdviseSetAccessedBy, *deviceId);
cudaMemAdvise(r + offset_r, sizeof(float) * totalThreadsPerGPU,
cudaMemAdviseSetAccessedBy, *deviceId);
cudaMemAdvise(p + offset_p, sizeof(float) * totalThreadsPerGPU,
cudaMemAdviseSetAccessedBy, *deviceId);
cudaMemAdvise(x + offset_x, sizeof(float) * totalThreadsPerGPU,
cudaMemAdviseSetAccessedBy, *deviceId);
offset_Ax += totalThreadsPerGPU * kNumGpusRequired;
offset_r += totalThreadsPerGPU * kNumGpusRequired;
offset_p += totalThreadsPerGPU * kNumGpusRequired;
offset_x += totalThreadsPerGPU * kNumGpusRequired;
if (offset_Ax >= N) {
break;
}
}
}
device_count++;
deviceId++;
}
#if ENABLE_CPU_DEBUG_CODE
float *Ax_cpu = (float *)malloc(sizeof(float) * N);
float *r_cpu = (float *)malloc(sizeof(float) * N);
float *p_cpu = (float *)malloc(sizeof(float) * N);
float *x_cpu = (float *)malloc(sizeof(float) * N);
for (int i = 0; i < N; i++) {
r_cpu[i] = 1.0;
Ax_cpu[i] = x_cpu[i] = 0.0;
}
#endif
printf("Total threads per GPU = %d numBlocksPerSm = %d\n",
numSms * numBlocksPerSm * THREADS_PER_BLOCK, numBlocksPerSm);
dim3 dimGrid(numSms * numBlocksPerSm, 1, 1), dimBlock(THREADS_PER_BLOCK, 1, 1);
void *kernelArgs[] = {
(void *)&I, (void *)&J, (void *)&val, (void *)&x,
(void *)&Ax, (void *)&p, (void *)&r, (void *)&dot_result,
(void *)&nz, (void *)&N, (void *)&tol,
};
cudaLaunchParams *launchParamsList = (cudaLaunchParams *)malloc(
sizeof(cudaLaunchParams) * kNumGpusRequired);
for (int i = 0; i < kNumGpusRequired; i++) {
launchParamsList[i].func = (void *)multiGpuConjugateGradient;
launchParamsList[i].gridDim = dimGrid;
launchParamsList[i].blockDim = dimBlock;
launchParamsList[i].sharedMem = sMemSize;
launchParamsList[i].stream = nStreams[i];
launchParamsList[i].args = kernelArgs;
}
printf("Launching kernel\n");
checkCudaErrors(cudaLaunchCooperativeKernelMultiDevice(
launchParamsList, kNumGpusRequired,
cudaCooperativeLaunchMultiDeviceNoPreSync |
cudaCooperativeLaunchMultiDeviceNoPostSync));
checkCudaErrors(
cudaMemPrefetchAsync(x, sizeof(float) * N, cudaCpuDeviceId));
checkCudaErrors(
cudaMemPrefetchAsync(dot_result, sizeof(double), cudaCpuDeviceId));
deviceId = bestFitDeviceIds.begin();;
device_count = 0;
while (deviceId != bestFitDeviceIds.end()) {
checkCudaErrors(cudaSetDevice(*deviceId));
checkCudaErrors(cudaStreamSynchronize(nStreams[device_count++]));
deviceId++;
}
r1 = (float)*dot_result;
printf("GPU Final, residual = %e \n ", sqrt(r1));
#if ENABLE_CPU_DEBUG_CODE
cpuConjugateGrad(I, J, val, x_cpu, Ax_cpu, p_cpu, r_cpu, nz, N, tol);
#endif
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++) {
rsum = 0.0;
for (int j = I[i]; j < I[i + 1]; j++) {
rsum += val_cpu[j] * x[J[j]];
}
diff = fabs(rsum - rhs);
if (diff > err) {
err = diff;
}
}
checkCudaErrors(cudaFree(I));
checkCudaErrors(cudaFree(J));
checkCudaErrors(cudaFree(val));
checkCudaErrors(cudaFree(x));
checkCudaErrors(cudaFree(r));
checkCudaErrors(cudaFree(p));
checkCudaErrors(cudaFree(Ax));
checkCudaErrors(cudaFree(dot_result));
free(val_cpu);
#if ENABLE_CPU_DEBUG_CODE
free(Ax_cpu);
free(r_cpu);
free(p_cpu);
free(x_cpu);
#endif
printf("Test Summary: Error amount = %f \n", err);
fprintf(stdout, "&&&& conjugateGradientMultiDeviceCG %s\n",
(sqrt(r1) < tol) ? "PASSED" : "FAILED");
exit((sqrt(r1) < tol) ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
711174040bae323381476cc67df0eac2ebf11a10.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image.h"
#include "stb_image_write.h"
void image_save(int *original, int rows, int cols, char* name)
{
unsigned char * image_final = (unsigned char*)calloc(rows*cols, sizeof(char)) ;
for(int i = 1 ; i < rows ; i++) {
for (int j = 1 ; j < cols ; j++ ) {
image_final[(i-1)*cols + j-1] = (unsigned char)original[(i-1)*cols + j-1] ;
}
}
stbi_write_png(name, rows, cols, 1, (const void*)image_final, rows);
}
__global__ void get_global_variance(int *local_variance, int *local_mean, int *image, int *image_filter, int *variance) {
int cols = blockDim.x ;
int var = *variance;
int r = blockIdx.x;
int c = threadIdx.x;
if(local_variance[r * cols + c] < var)
local_variance[r * cols + c] = var;
image_filter[r * cols + c] = image[r * cols + c] - (var / local_variance[r * cols + c]) * (image[r * cols + c] - local_mean[r * cols + c]);
}
int get_sum2(int *arr, int rows, int cols) {
int temp_sum = 0;
for(int i = 0; i < rows; i++)
for(int j = 0; j < cols; j++)
temp_sum += arr[i * cols + j];
return temp_sum;
}
__global__ void square_matrix2(int *image, int *image_sq ) {
int row_id = blockIdx.x ;
int col_id = threadIdx.x ;
int columns = blockDim.x ;
int sum = 0 ;
for(int k = 0; k < columns ; k++)
sum = sum + image[row_id*columns + k]*image[col_id*columns + k] ;
image_sq[row_id *columns + col_id] = sum ;
}
__device__ void square_matrix1(int *mat,int *result ,int rows, int cols) {
int temp_sum = 0 ;
for(int i = 0; i < rows; i++) {
for(int j = 0; j < cols; j++) {
temp_sum = 0 ;
for(int k = 0; k < cols; k++)
temp_sum = temp_sum + mat[i*cols + k] * mat[j*cols + k] ;
result[i*cols + j] = temp_sum ;
}
}
}
__device__ int get_sum(int *arr, int rows, int cols) {
int temp_sum = 0;
for(int i = 0; i < rows; i++)
for(int j = 0; j < cols; j++)
temp_sum += arr[i * cols + j];
return temp_sum;
}
__device__ void get_neighbours(int *image, int *near , int curr_row, int curr_col, int cols) {
int next = 0;
for(int i = curr_row - 1; i < curr_row + 2; i++) {
for(int j = curr_col - 1; j < curr_col + 2; j++) {
near[next] = image[i * cols + j];
next++;
}
}
}
__global__ void compute_local_mean_variance(int *image_pad, int *local_mean, int *local_variance) {
int r = blockIdx.x;
int c = threadIdx.x;
int columns = blockDim.x ;
int near_sq[9] ;
int near[9] ;
if(r != 0 && c != 0) {
get_neighbours(image_pad,near,r, c, columns + 1);
int curr_mean = get_sum(near, 3, 3) / 9;
local_mean[(r - 1) *columns + (c - 1)] = curr_mean;
square_matrix1(near, near_sq , 3, 3);
local_variance[(r - 1) * columns + (c - 1)] = get_sum(near_sq, 3, 3) / 9 - curr_mean;
}
}
__global__ void image_padding(int *image, int *image_pad) {
int r = blockIdx.x;
int c = threadIdx.x;
int rows = gridDim.x ;
int cols = blockDim.x ;
if(r != 0 && c != 0 && r != rows - 1 && c != cols - 1)
image_pad[r*cols + c] = image[(r - 1)*(cols - 1) + c - 1];
else
image_pad[r*cols + c] = 0;
}
__global__ void loadIMG(char *temp_image, int *image) {
int r = blockIdx.x;
int c = threadIdx.x;
int cols = blockDim.x ;
image[r *cols + c] = (int) temp_image[r *cols + c];
}
__global__ void sobel_horizontal(int *image_final, int *image_pad, int *sobel) {
int cols = blockDim.x ;
int rows = gridDim.x ;
int r = blockIdx.x;
int c = threadIdx.x;
int temp = 0;
int near[9] ;
if(r > 0 && c > 0 && r < rows - 1 && c < cols - 1 ) {
get_neighbours(image_pad, near,r, c, cols);
for(int k = 0; k < 9; k++)
temp += near[k] * sobel[k];
image_final[(r - 1)*(cols-1) + (c - 1)] = temp;
}
}
void err(int checker) {
hipError_t errchck = hipGetLastError() ;
if (errchck != hipSuccess )
printf(" %d %s \n" , checker , hipGetErrorString(errchck ) ) ;
}
int main() {
int variance, rows, cols, bpp;
char name[100] ;
// 1) Read the image
unsigned char *temp_image = stbi_load("logo.png", &rows, &cols, &bpp, 1);
int image[rows * cols];
// Parallel conversion of char image to int image
int *p_image;
char *p_temp_image;
int checkers = 0 ;
hipMalloc((void **)&p_image, sizeof(int) * rows * cols);
hipMalloc((void **)&p_temp_image, sizeof(char) * rows * cols);
hipMemcpy(p_temp_image, temp_image, sizeof(char) * rows * cols, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( loadIMG), dim3(rows), dim3(cols), 0, 0, p_temp_image, p_image);
// Declarations
int *image_sq = (int *)malloc(sizeof(int) * rows * cols);
int sobel[9] = {1, 2, 1, 0, 0, 0, -1, -2, -1};
int image_filter[rows * cols];
// 2) Padding the Image
int *p_image_pad;
hipMalloc((void **)&p_image_pad, sizeof(int) * (rows + 1) * (cols + 1));
rows += 1;
cols += 1 ;
hipLaunchKernelGGL(( image_padding), dim3(rows),dim3(cols), 0, 0, p_image, p_image_pad);
err(100) ;
rows -= 1;
cols -= 1;
// 3) Computing Local Mean and Local Variance
int *p_local_mean, *p_local_variance;
hipMalloc((void **)&p_local_mean, sizeof(int)*rows*cols);
hipMalloc((void **)&p_local_variance, sizeof(int)*rows*cols);
hipLaunchKernelGGL(( compute_local_mean_variance), dim3(rows), dim3(cols), 0, 0, p_image_pad, p_local_mean, p_local_variance);
// 4) Get Global Variance
int *p_image_sq;
hipMalloc((void **)&p_image_sq, sizeof(int) * rows * cols);
hipLaunchKernelGGL(( square_matrix2), dim3(rows), dim3(cols), 0, 0, p_image, p_image_sq);
hipMemcpy(image_sq, p_image_sq, sizeof(int) * rows * cols, hipMemcpyDeviceToHost);
hipFree(p_image_sq);
// Get Sum2 Function doesn't need to be parallelized
variance = get_sum2(image_sq , 3, 3) - get_sum2(image , 3 , 3) ;
variance = variance / (rows * cols);
int *p_image_filter, *p_variance;
hipMalloc((void **)&p_image_filter, sizeof(int) * rows * cols);
hipMalloc((void **)&p_variance, sizeof(int));
hipMemcpy(p_variance, &variance, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( get_global_variance), dim3(rows), dim3(cols), 0, 0, p_local_variance, p_local_mean, p_image, p_image_filter, p_variance);
hipMemcpy(image_filter, p_image_filter, sizeof(int) * rows * cols, hipMemcpyDeviceToHost);
strcpy(name, "noise_removed.png");
image_save(image_filter, rows, cols, name);
hipDeviceSynchronize() ;
// 5) Apply horizontal sobel filter for edge detection
rows += 1; /* Investigate this further */
cols += 1;
hipLaunchKernelGGL(( image_padding), dim3(rows), dim3(cols), 0, 0, p_image_filter, p_image_pad);
hipDeviceSynchronize() ;
rows -= 1;
cols -= 1;
hipFree(p_local_variance);
hipFree(p_local_mean);
hipFree(p_image);
int image_final[rows*cols] ;
int *p_image_final;
int *p_sobel;
hipMalloc((void **)&p_image_final, sizeof(int)*rows*cols);
hipMalloc((void **)&p_sobel, sizeof(int) * 9);
hipMemcpy(p_sobel, sobel, sizeof(int) * 9, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sobel_horizontal), dim3(rows+1), dim3(cols+1), 0, 0, p_image_final, p_image_pad, p_sobel);
hipMemcpy(image_final, p_image_final, sizeof(int)*rows*cols, hipMemcpyDeviceToHost);
err(checkers++) ;
printf("\n\nFunction 5.2 , %d \n\n" , checkers);
strcpy(name, "final_image.png");
image_save(image_final, rows, cols, name);
printf(" Processing complete , open final_image.png to see results \n");
hipFree(p_sobel);
hipFree(p_image_pad);
hipFree(p_image_final);
hipFree(p_local_variance);
hipFree(p_local_mean);
hipFree(p_image);
return 0 ;
}
|
711174040bae323381476cc67df0eac2ebf11a10.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image.h"
#include "stb_image_write.h"
void image_save(int *original, int rows, int cols, char* name)
{
unsigned char * image_final = (unsigned char*)calloc(rows*cols, sizeof(char)) ;
for(int i = 1 ; i < rows ; i++) {
for (int j = 1 ; j < cols ; j++ ) {
image_final[(i-1)*cols + j-1] = (unsigned char)original[(i-1)*cols + j-1] ;
}
}
stbi_write_png(name, rows, cols, 1, (const void*)image_final, rows);
}
__global__ void get_global_variance(int *local_variance, int *local_mean, int *image, int *image_filter, int *variance) {
int cols = blockDim.x ;
int var = *variance;
int r = blockIdx.x;
int c = threadIdx.x;
if(local_variance[r * cols + c] < var)
local_variance[r * cols + c] = var;
image_filter[r * cols + c] = image[r * cols + c] - (var / local_variance[r * cols + c]) * (image[r * cols + c] - local_mean[r * cols + c]);
}
int get_sum2(int *arr, int rows, int cols) {
int temp_sum = 0;
for(int i = 0; i < rows; i++)
for(int j = 0; j < cols; j++)
temp_sum += arr[i * cols + j];
return temp_sum;
}
__global__ void square_matrix2(int *image, int *image_sq ) {
int row_id = blockIdx.x ;
int col_id = threadIdx.x ;
int columns = blockDim.x ;
int sum = 0 ;
for(int k = 0; k < columns ; k++)
sum = sum + image[row_id*columns + k]*image[col_id*columns + k] ;
image_sq[row_id *columns + col_id] = sum ;
}
__device__ void square_matrix1(int *mat,int *result ,int rows, int cols) {
int temp_sum = 0 ;
for(int i = 0; i < rows; i++) {
for(int j = 0; j < cols; j++) {
temp_sum = 0 ;
for(int k = 0; k < cols; k++)
temp_sum = temp_sum + mat[i*cols + k] * mat[j*cols + k] ;
result[i*cols + j] = temp_sum ;
}
}
}
__device__ int get_sum(int *arr, int rows, int cols) {
int temp_sum = 0;
for(int i = 0; i < rows; i++)
for(int j = 0; j < cols; j++)
temp_sum += arr[i * cols + j];
return temp_sum;
}
__device__ void get_neighbours(int *image, int *near , int curr_row, int curr_col, int cols) {
int next = 0;
for(int i = curr_row - 1; i < curr_row + 2; i++) {
for(int j = curr_col - 1; j < curr_col + 2; j++) {
near[next] = image[i * cols + j];
next++;
}
}
}
__global__ void compute_local_mean_variance(int *image_pad, int *local_mean, int *local_variance) {
int r = blockIdx.x;
int c = threadIdx.x;
int columns = blockDim.x ;
int near_sq[9] ;
int near[9] ;
if(r != 0 && c != 0) {
get_neighbours(image_pad,near,r, c, columns + 1);
int curr_mean = get_sum(near, 3, 3) / 9;
local_mean[(r - 1) *columns + (c - 1)] = curr_mean;
square_matrix1(near, near_sq , 3, 3);
local_variance[(r - 1) * columns + (c - 1)] = get_sum(near_sq, 3, 3) / 9 - curr_mean;
}
}
__global__ void image_padding(int *image, int *image_pad) {
int r = blockIdx.x;
int c = threadIdx.x;
int rows = gridDim.x ;
int cols = blockDim.x ;
if(r != 0 && c != 0 && r != rows - 1 && c != cols - 1)
image_pad[r*cols + c] = image[(r - 1)*(cols - 1) + c - 1];
else
image_pad[r*cols + c] = 0;
}
__global__ void loadIMG(char *temp_image, int *image) {
int r = blockIdx.x;
int c = threadIdx.x;
int cols = blockDim.x ;
image[r *cols + c] = (int) temp_image[r *cols + c];
}
__global__ void sobel_horizontal(int *image_final, int *image_pad, int *sobel) {
int cols = blockDim.x ;
int rows = gridDim.x ;
int r = blockIdx.x;
int c = threadIdx.x;
int temp = 0;
int near[9] ;
if(r > 0 && c > 0 && r < rows - 1 && c < cols - 1 ) {
get_neighbours(image_pad, near,r, c, cols);
for(int k = 0; k < 9; k++)
temp += near[k] * sobel[k];
image_final[(r - 1)*(cols-1) + (c - 1)] = temp;
}
}
void err(int checker) {
cudaError_t errchck = cudaGetLastError() ;
if (errchck != cudaSuccess )
printf(" %d %s \n" , checker , cudaGetErrorString(errchck ) ) ;
}
int main() {
int variance, rows, cols, bpp;
char name[100] ;
// 1) Read the image
unsigned char *temp_image = stbi_load("logo.png", &rows, &cols, &bpp, 1);
int image[rows * cols];
// Parallel conversion of char image to int image
int *p_image;
char *p_temp_image;
int checkers = 0 ;
cudaMalloc((void **)&p_image, sizeof(int) * rows * cols);
cudaMalloc((void **)&p_temp_image, sizeof(char) * rows * cols);
cudaMemcpy(p_temp_image, temp_image, sizeof(char) * rows * cols, cudaMemcpyHostToDevice);
loadIMG<<<rows, cols>>>(p_temp_image, p_image);
// Declarations
int *image_sq = (int *)malloc(sizeof(int) * rows * cols);
int sobel[9] = {1, 2, 1, 0, 0, 0, -1, -2, -1};
int image_filter[rows * cols];
// 2) Padding the Image
int *p_image_pad;
cudaMalloc((void **)&p_image_pad, sizeof(int) * (rows + 1) * (cols + 1));
rows += 1;
cols += 1 ;
image_padding<<<rows,cols>>>(p_image, p_image_pad);
err(100) ;
rows -= 1;
cols -= 1;
// 3) Computing Local Mean and Local Variance
int *p_local_mean, *p_local_variance;
cudaMalloc((void **)&p_local_mean, sizeof(int)*rows*cols);
cudaMalloc((void **)&p_local_variance, sizeof(int)*rows*cols);
compute_local_mean_variance<<<rows, cols>>>(p_image_pad, p_local_mean, p_local_variance);
// 4) Get Global Variance
int *p_image_sq;
cudaMalloc((void **)&p_image_sq, sizeof(int) * rows * cols);
square_matrix2<<<rows, cols>>>(p_image, p_image_sq);
cudaMemcpy(image_sq, p_image_sq, sizeof(int) * rows * cols, cudaMemcpyDeviceToHost);
cudaFree(p_image_sq);
// Get Sum2 Function doesn't need to be parallelized
variance = get_sum2(image_sq , 3, 3) - get_sum2(image , 3 , 3) ;
variance = variance / (rows * cols);
int *p_image_filter, *p_variance;
cudaMalloc((void **)&p_image_filter, sizeof(int) * rows * cols);
cudaMalloc((void **)&p_variance, sizeof(int));
cudaMemcpy(p_variance, &variance, sizeof(int), cudaMemcpyHostToDevice);
get_global_variance<<<rows, cols>>>(p_local_variance, p_local_mean, p_image, p_image_filter, p_variance);
cudaMemcpy(image_filter, p_image_filter, sizeof(int) * rows * cols, cudaMemcpyDeviceToHost);
strcpy(name, "noise_removed.png");
image_save(image_filter, rows, cols, name);
cudaDeviceSynchronize() ;
// 5) Apply horizontal sobel filter for edge detection
rows += 1; /* Investigate this further */
cols += 1;
image_padding<<<rows, cols>>>(p_image_filter, p_image_pad);
cudaDeviceSynchronize() ;
rows -= 1;
cols -= 1;
cudaFree(p_local_variance);
cudaFree(p_local_mean);
cudaFree(p_image);
int image_final[rows*cols] ;
int *p_image_final;
int *p_sobel;
cudaMalloc((void **)&p_image_final, sizeof(int)*rows*cols);
cudaMalloc((void **)&p_sobel, sizeof(int) * 9);
cudaMemcpy(p_sobel, sobel, sizeof(int) * 9, cudaMemcpyHostToDevice);
sobel_horizontal<<<rows+1, cols+1>>>(p_image_final, p_image_pad, p_sobel);
cudaMemcpy(image_final, p_image_final, sizeof(int)*rows*cols, cudaMemcpyDeviceToHost);
err(checkers++) ;
printf("\n\nFunction 5.2 , %d \n\n" , checkers);
strcpy(name, "final_image.png");
image_save(image_final, rows, cols, name);
printf(" Processing complete , open final_image.png to see results \n");
cudaFree(p_sobel);
cudaFree(p_image_pad);
cudaFree(p_image_final);
cudaFree(p_local_variance);
cudaFree(p_local_mean);
cudaFree(p_image);
return 0 ;
}
|
840e8745ceac1d2061e0ffaa103a9d54a4096e8e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#include "WFObjectToString.h"
#include "UniformGridSortBuilderTest.h"
#include "GraphTest.h"
#include "CollisionTest.h"
#include "ShapeVariationTest.h"
#include "RNGTest.h"
#include "WiggleTest.h"
#include <thrust/detail/config.h>
int main()
{
#if 0//THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
std::cerr << "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n";
return 1;
}
#endif
RNGTest rngTest;
int rng_test_result = rngTest.testAll();
//graphTest.testAll(1000);
if (rng_test_result != 0)
{
std::cerr << "Random number generator test failed!\n";
return rng_test_result;
}
else
{
std::cerr << "Random number generator test passed.\n";
}
UniformGridSortBuildTest uniformGridTest;
int ugrid_test_result = uniformGridTest.testAll("../scenes/church/church.obj", 32, 16, 24);
if (ugrid_test_result != 0)
{
std::cerr << "Uniform grid construction test failed!\n";
return ugrid_test_result;
}
else
{
std::cerr << "Uniform grid construction test passed.\n";
}
GraphTest graphTest;
int graph_test_result = graphTest.testAll(1000);
//graphTest.testAll(1000);
if (graph_test_result != 0)
{
std::cerr << "Graph construction test failed!\n";
return graph_test_result;
}
else
{
std::cerr << "Graph construction test passed.\n";
}
CollisionTest collTest;
int coll_test_result = collTest.testAll("../scenes/castle/castle.obj");
if (coll_test_result != 0)
{
std::cerr << "Collision detection test failed!\n";
return coll_test_result;
}
else
{
std::cerr << "Collision detection test passed. \n";
}
std::cerr << "---------------------------------------------------------------------\n";
const char* obj2strTestFile = "../scenes/church/church.obj";
std::cerr << obj2strTestFile << " converted to \n"
<< WFObjectToString(obj2strTestFile) << "\n";
const char* variationFile1 = "../scenes/test_church/c19.obj";
const char* variationFile2 = "../scenes/test_church/c28.obj";
//const char* variationFile1 = "../scenes/test_skyscraper/v01.obj";
//const char* variationFile2 = "../scenes/test_skyscraper/v02.obj";
//const char* variationFile1 = "../scenes/test_sand_castle/v01.obj";
//const char* variationFile2 = "../scenes/test_sand_castle/v02.obj";
//const char* variationFile1 = "../scenes/test_playground/v001.obj";
//const char* variationFile2 = "../scenes/test_playground/v002.obj";
//const char* variationFile1 = "../scenes/moon_base/variant_01.obj";
//const char* variationFile2 = "../scenes/moon_base/variant_02.obj";
ShapeVariationTest variationTest;
int var_test_result = variationTest.testAll(variationFile1, variationFile2);
if (var_test_result != 0)
{
std::cerr << "Shape variation test failed!\n";
return var_test_result;
}
else
{
std::cerr << "Shape variation test passed. \n";
}
//const char* wiggleFile1 = "../scenes/wiggle_test/v001.obj";
//const char* wiggleFile2 = "../scenes/wiggle_test/v002.obj";
//const char* wiggleFile3 = "../scenes/wiggle_test/v001_v002_2_v002_2_v001_v002_3_v002_3_2_v001_v002_3_v002_3_v002_5_1.obj";
//const char* wiggleFile3 = "../scenes/wiggle_test/v001_v001_v002_2_3_v001_v002_3_v002_3_3_v001_v002_2_v002_2_v001_v002_3_v002_3_3_3.obj";
//const char* wiggleFile3 = "../scenes/wiggle_test/v002.obj";
const char* wiggleFile1 = "../scenes/wiggle_test/c19.obj";
const char* wiggleFile2 = "../scenes/wiggle_test/c28.obj";
//const char* wiggleFile3 = "../scenes/wiggle_test/v_1_4_11.obj";
//const char* wiggleFile3 = "../scenes/wiggle_test/c19_c19_c28_12_1.obj";
const char* wiggleFile3 = "../scenes/wiggle_test/v_1_4_12.obj";
//const char* wiggleFile1 = "../scenes/skyscraper/v01.obj";
//const char* wiggleFile2 = "../scenes/skyscraper/v02.obj";
//const char* wiggleFile3 = "../scenes/skyscraper/v01_v02_5.obj";
//const char* wiggleFile1 = "../scenes/sand_castle/v01.obj";
//const char* wiggleFile2 = "../scenes/sand_castle/v02.obj";
//const char* wiggleFile3 = "../scenes/wiggle_test/v01_v02_3.obj";
const char* wiggleOutFile = "../scenes/wiggle_test/fixed.obj";
WiggleTest wiggleTest;
int wiggle_test_result = wiggleTest.testAll(wiggleFile1, wiggleFile2, wiggleFile3, wiggleOutFile);
if (wiggle_test_result != 0)
{
std::cerr << "Wiggle test failed!\n";
if (wiggle_test_result == 1)
{
std::cerr << "Invalid repair target - does not conform grammar.\n";
}
else if (wiggle_test_result == 2)
{
std::cerr << "Object repair attempt failed.\n";
}
return wiggle_test_result;
}
else
{
std::cerr << "Object repair attempt succeeded.\n";
std::cerr << "Wrote " << wiggleOutFile << "\n";
}
#if 0//THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
//hipDeviceReset must be called before exiting in order for profiling and
//tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
std::cerr << "hipDeviceReset failed!\n";
return 1;
}
#endif
return 0;
}
|
840e8745ceac1d2061e0ffaa103a9d54a4096e8e.cu
|
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#include "WFObjectToString.h"
#include "UniformGridSortBuilderTest.h"
#include "GraphTest.h"
#include "CollisionTest.h"
#include "ShapeVariationTest.h"
#include "RNGTest.h"
#include "WiggleTest.h"
#include <thrust/detail/config.h>
int main()
{
#if 0//THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n";
return 1;
}
#endif
RNGTest rngTest;
int rng_test_result = rngTest.testAll();
//graphTest.testAll(1000);
if (rng_test_result != 0)
{
std::cerr << "Random number generator test failed!\n";
return rng_test_result;
}
else
{
std::cerr << "Random number generator test passed.\n";
}
UniformGridSortBuildTest uniformGridTest;
int ugrid_test_result = uniformGridTest.testAll("../scenes/church/church.obj", 32, 16, 24);
if (ugrid_test_result != 0)
{
std::cerr << "Uniform grid construction test failed!\n";
return ugrid_test_result;
}
else
{
std::cerr << "Uniform grid construction test passed.\n";
}
GraphTest graphTest;
int graph_test_result = graphTest.testAll(1000);
//graphTest.testAll(1000);
if (graph_test_result != 0)
{
std::cerr << "Graph construction test failed!\n";
return graph_test_result;
}
else
{
std::cerr << "Graph construction test passed.\n";
}
CollisionTest collTest;
int coll_test_result = collTest.testAll("../scenes/castle/castle.obj");
if (coll_test_result != 0)
{
std::cerr << "Collision detection test failed!\n";
return coll_test_result;
}
else
{
std::cerr << "Collision detection test passed. \n";
}
std::cerr << "---------------------------------------------------------------------\n";
const char* obj2strTestFile = "../scenes/church/church.obj";
std::cerr << obj2strTestFile << " converted to \n"
<< WFObjectToString(obj2strTestFile) << "\n";
const char* variationFile1 = "../scenes/test_church/c19.obj";
const char* variationFile2 = "../scenes/test_church/c28.obj";
//const char* variationFile1 = "../scenes/test_skyscraper/v01.obj";
//const char* variationFile2 = "../scenes/test_skyscraper/v02.obj";
//const char* variationFile1 = "../scenes/test_sand_castle/v01.obj";
//const char* variationFile2 = "../scenes/test_sand_castle/v02.obj";
//const char* variationFile1 = "../scenes/test_playground/v001.obj";
//const char* variationFile2 = "../scenes/test_playground/v002.obj";
//const char* variationFile1 = "../scenes/moon_base/variant_01.obj";
//const char* variationFile2 = "../scenes/moon_base/variant_02.obj";
ShapeVariationTest variationTest;
int var_test_result = variationTest.testAll(variationFile1, variationFile2);
if (var_test_result != 0)
{
std::cerr << "Shape variation test failed!\n";
return var_test_result;
}
else
{
std::cerr << "Shape variation test passed. \n";
}
//const char* wiggleFile1 = "../scenes/wiggle_test/v001.obj";
//const char* wiggleFile2 = "../scenes/wiggle_test/v002.obj";
//const char* wiggleFile3 = "../scenes/wiggle_test/v001_v002_2_v002_2_v001_v002_3_v002_3_2_v001_v002_3_v002_3_v002_5_1.obj";
//const char* wiggleFile3 = "../scenes/wiggle_test/v001_v001_v002_2_3_v001_v002_3_v002_3_3_v001_v002_2_v002_2_v001_v002_3_v002_3_3_3.obj";
//const char* wiggleFile3 = "../scenes/wiggle_test/v002.obj";
const char* wiggleFile1 = "../scenes/wiggle_test/c19.obj";
const char* wiggleFile2 = "../scenes/wiggle_test/c28.obj";
//const char* wiggleFile3 = "../scenes/wiggle_test/v_1_4_11.obj";
//const char* wiggleFile3 = "../scenes/wiggle_test/c19_c19_c28_12_1.obj";
const char* wiggleFile3 = "../scenes/wiggle_test/v_1_4_12.obj";
//const char* wiggleFile1 = "../scenes/skyscraper/v01.obj";
//const char* wiggleFile2 = "../scenes/skyscraper/v02.obj";
//const char* wiggleFile3 = "../scenes/skyscraper/v01_v02_5.obj";
//const char* wiggleFile1 = "../scenes/sand_castle/v01.obj";
//const char* wiggleFile2 = "../scenes/sand_castle/v02.obj";
//const char* wiggleFile3 = "../scenes/wiggle_test/v01_v02_3.obj";
const char* wiggleOutFile = "../scenes/wiggle_test/fixed.obj";
WiggleTest wiggleTest;
int wiggle_test_result = wiggleTest.testAll(wiggleFile1, wiggleFile2, wiggleFile3, wiggleOutFile);
if (wiggle_test_result != 0)
{
std::cerr << "Wiggle test failed!\n";
if (wiggle_test_result == 1)
{
std::cerr << "Invalid repair target - does not conform grammar.\n";
}
else if (wiggle_test_result == 2)
{
std::cerr << "Object repair attempt failed.\n";
}
return wiggle_test_result;
}
else
{
std::cerr << "Object repair attempt succeeded.\n";
std::cerr << "Wrote " << wiggleOutFile << "\n";
}
#if 0//THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
//cudaDeviceReset must be called before exiting in order for profiling and
//tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaDeviceReset failed!\n";
return 1;
}
#endif
return 0;
}
|
c9d4371687f3b8ce30bb73993d98928e052d6cd0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cutil.h>
#include <util.h>
#include <types.h>
#include <basic_types.h>
#include <texture.h>
#include <matrix_analysis.h>
#include <aggregation/selectors/size8_selector.h>
#include <thrust/count.h> //count
#include <thrust/sort.h> //sort
#include <thrust/unique.h> //unique
#include <thrust/remove.h> //remove
#include <thrust/transform_scan.h> //transform_inclusive_scan
#include <cusp/detail/format_utils.h> //offsets_to_indices
#include <iostream>
#include <strided_reduction.h>
#include <aggregation/selectors/selector_kernels.h>
namespace amgx
{
namespace strided_reduction
{
template<int STRIDE, class scalar_t, class OP>
void count_block_results_pinned_memory(scalar_t *out_host, const int n_blocks, scalar_t *out_d, const OP &op = OP(), hipStream_t stream = 0)
{
hipLaunchKernelGGL(( strided_reduction_collect_partials<scalar_t, STRIDE, 32, OP>) , dim3(1), dim3(32), 0, stream, out_host, out_d, n_blocks);
cudaCheckError();
}
template<class scalar_t, class OP>
scalar_t count_block_results_pinned_memory(const int a, const int i, const int n_blocks, scalar_t *out_d, const OP &op = OP(), hipStream_t stream = 0) //STRIDE=1 case
{
static scalar_t *ret = 0;
static hipEvent_t throttle_event = 0;
const int buffers = 1;
if (ret == 0)
{
thrust::global_thread_handle::hipHostMalloc((void **)&ret, buffers * sizeof(scalar_t));
ret[0] = 0;
hipEventCreateWithFlags(&throttle_event, hipEventDisableTiming);
}
int ib = i % buffers;
count_block_results_pinned_memory<1, scalar_t, OP>(ret + ib, n_blocks, out_d, op, stream);
if (ib == buffers - 1)
{
hipEventRecord(throttle_event);
hipEventSynchronize(throttle_event);
scalar_t tot = 0;
for (int j = 0; j < buffers; j++)
{
tot += ret[j];
}
return tot + buffers - 1;
}
else
{
return -1;
}
}
}
void analyze_coloring(device_vector_alloc<int> aggregates_d, device_vector_alloc<int> colors_d);
namespace aggregation
{
namespace size8_selector
{
// include common routines for all selectors
#include <aggregation/selectors/common_selector.h>
// ------------------------
// Kernels
// ------------------------
// findStrongestNeighbour kernel for block_dia_csr_matrix format
// Reads the weight from edge_weights array
template <typename IndexType>
__global__
void findStrongestNeighbourBlockDiaCsr_StoreWeight_2(const IndexType *row_offsets, const IndexType *column_indices,
const float *edge_weights, const IndexType num_block_rows, IndexType *aggregated, IndexType *aggregates, int *strongest_neighbour, IndexType *partner_index, float *weight_strongest_neighbour)
{
float weight;
int jcol, jmin, jmax;
int partner0, partner1, partner2;
int agg_jcol;
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; tid < num_block_rows; tid += blockDim.x * gridDim.x)
{
float max_weight_unaggregated = 0.;
float max_weight_aggregated = 0.;
int strongest_unaggregated = -1;
int strongest_aggregated = -1;
if (aggregated[tid] == -1) // Unaggregated row
{
partner0 = partner_index[tid];
partner1 = partner_index[num_block_rows + tid];
partner2 = partner_index[2 * num_block_rows + tid];
jmin = row_offsets[tid];
jmax = row_offsets[tid + 1];
for (int j = jmin; j < jmax; j++)
{
jcol = column_indices[j];
if (jcol == tid || jcol >= num_block_rows) { continue; }
weight = edge_weights[j];
agg_jcol = aggregated[jcol];
if (jcol != partner0 && jcol != partner1 && jcol != partner2)
{
if (agg_jcol == -1 && (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
else if (agg_jcol != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) // unaggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
}
if (strongest_unaggregated == -1 && strongest_aggregated != -1) // all neighbours are aggregated, store the strongest aggregated
{
weight_strongest_neighbour[tid] = -max_weight_aggregated;
strongest_neighbour[tid] = aggregates[strongest_aggregated];
}
else if (strongest_unaggregated != -1)
{
weight_strongest_neighbour[tid] = max_weight_unaggregated;
strongest_neighbour[tid] = aggregates[strongest_unaggregated];
}
}
}
}
// findStrongestNeighbour kernel for block_dia_csr_matrix format
// Reads the weight from edge_weights array
template <typename IndexType>
__global__
void agreeOnProposal_2(const IndexType *row_offsets, const IndexType *column_indices,
IndexType num_block_rows, IndexType *aggregated, int *strongest_neighbour, float *weight_strongest_neighbour, IndexType *partner_index, int *aggregates, int deterministic)
{
int partner[3];
float weight[3];
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; tid < num_block_rows; tid += blockDim.x * gridDim.x)
{
int strongest_partner = -1;
float my_weight = 0.;
if (aggregated[tid] == -1)
{
my_weight = weight_strongest_neighbour[tid];
#pragma unroll
for (int m = 0; m < 3; m++)
{
partner[m] = partner_index[tid + m * num_block_rows];
}
#pragma unroll
for (int m = 0; m < 3; m++)
{
weight[m] = weight_strongest_neighbour[partner[m]];
}
#pragma unroll
for (int m = 0; m < 3; m++)
{
//if (weight[m] > my_weight && weight[m] > 0.) // there is a partner that has an unaggregated neighbour
if (weight[m] > my_weight)
{
if (weight[m] > 0.) // there is a partner that has an unaggregated neighbour
{
strongest_partner = m;
my_weight = weight[m];
}
}
else if (weight[m] < my_weight) // there is a partner without an unaggregated neighbour, whose neighbour is stronger than mine
{
if (my_weight < 0.)
{
strongest_partner = m;
my_weight = weight[m];
}
}
}
if (my_weight < 0.) // means all neighbours of vertices in aggregate are aggregated, merge to another aggregate
{
if (!deterministic)
{
aggregated[tid] = 1;
aggregates[tid] = strongest_partner != -1 ? strongest_neighbour[partner[strongest_partner]] : strongest_neighbour[tid];
}
}
else if (strongest_partner != -1) // store my partner's pick
{
strongest_neighbour[tid] = strongest_neighbour[partner[strongest_partner]];
}
}
}
}
template <typename IndexType>
__global__
void agreeOnProposal_2_deterministic(int *strongest_neighbour_out, const IndexType *row_offsets, const IndexType *column_indices,
IndexType num_block_rows, IndexType *aggregated, int *strongest_neighbour, float *weight_strongest_neighbour, IndexType *partner_index, int *aggregates, int deterministic)
{
int partner[3];
float weight[3];
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; tid < num_block_rows; tid += blockDim.x * gridDim.x)
{
int strongest_partner = -1;
float my_weight = 0.;
//copy here to avoid redundant copies before launching the kernel
int new_strongest_neighbour_out = strongest_neighbour[tid];
if (aggregated[tid] == -1)
{
my_weight = weight_strongest_neighbour[tid];
#pragma unroll
for (int m = 0; m < 3; m++)
{
partner[m] = partner_index[tid + m * num_block_rows];
}
#pragma unroll
for (int m = 0; m < 3; m++)
{
weight[m] = weight_strongest_neighbour[partner[m]];
}
#pragma unroll
for (int m = 0; m < 3; m++)
{
if (weight[m] > my_weight)
{
if (weight[m] > 0.) // there is a partner that has an unaggregated neighbour
{
strongest_partner = m;
my_weight = weight[m];
}
}
else if (weight[m] < my_weight) // there is a partner without an unaggregated neighbour, whose neighbour is stronger than mine
{
if (my_weight < 0.)
{
strongest_partner = m;
my_weight = weight[m];
}
}
}
if (my_weight < 0.) // means all neighbours of vertices in aggregate are aggregated, merge to another aggregate
{
if (!deterministic)
{
aggregated[tid] = 1;
aggregates[tid] = strongest_partner != -1 ? strongest_neighbour[partner[strongest_partner]] : strongest_neighbour[tid];
}
}
else if (strongest_partner != -1) // store my partner's pick
{
new_strongest_neighbour_out = strongest_neighbour[partner[strongest_partner]];
}
}
//copy here to avoid redundant copies before launching the kernel
strongest_neighbour_out[tid] = new_strongest_neighbour_out;
}
}
// Kernel that checks if perfect matchs exist
template <typename IndexType>
__global__
void matchAggregatesSize4(IndexType *aggregates, IndexType *aggregated, IndexType *strongest_neighbour, IndexType *partner_index, const IndexType num_rows)
{
int potential_match, potential_match_neighbour, my_aggregate;
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; tid < num_rows; tid += blockDim.x * gridDim.x)
{
if (aggregated[tid] == -1) // Unaggregated row
{
potential_match = strongest_neighbour[tid];
if (potential_match != -1)
{
potential_match_neighbour = strongest_neighbour[potential_match];
my_aggregate = aggregates[tid];
if (potential_match_neighbour == my_aggregate) // we have a match
{
aggregated[tid] = 1;
aggregates[tid] = ( potential_match > my_aggregate) ? my_aggregate : potential_match;
partner_index[tid + num_rows] = potential_match;
partner_index[tid + 2 * num_rows] = partner_index[potential_match];
}
}
}
}
}
template <typename IndexType>
__global__
void assignUnassignedVertices_2(IndexType *partner_index, const IndexType num_rows)
{
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; tid < num_rows; tid += blockDim.x * gridDim.x)
{
if (partner_index[num_rows + tid] == -1) // Unaggregated row
{
partner_index[num_rows + tid] = tid;
}
if (partner_index[2 * num_rows + tid] == -1) // Unaggregated row
{
partner_index[2 * num_rows + tid] = tid;
}
}
}
// -----------------
// Methods
// ----------------
// Constructor
template<class T_Config>
Size8SelectorBase<T_Config>::Size8SelectorBase(AMG_Config &cfg, const std::string &cfg_scope)
{
deterministic = cfg.AMG_Config::getParameter<IndexType>("determinism_flag", "default");
max_iterations = cfg.AMG_Config::getParameter<IndexType>("max_matching_iterations", cfg_scope);
numUnassigned_tol = cfg.AMG_Config::getParameter<double>("max_unassigned_percentage", cfg_scope);
m_aggregation_edge_weight_component = cfg.AMG_Config::getParameter<int>("aggregation_edge_weight_component", cfg_scope);
weight_formula = cfg.AMG_Config::getParameter<int>("weight_formula", cfg_scope);
}
// setAggregates for block_dia_csr_matrix_h format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Size8Selector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblock(const Matrix_h &A,
IVector &aggregates, IVector &aggregates_global, int &num_aggregates)
{
FatalError("Size8 selector: setAggregates not implemented on CPU, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
template <int NUM_COLS, typename IndexType, typename ValueType>
__global__ //__launch_bounds__(256,2)
void computeEdgeWeightsBlockDiaCsr_V2_1(
const IndexType *row_offsets,
//const int* __myrestrict row_offsets,
const IndexType *row_indices,
const IndexType *column_indices,
const IndexType *dia_values,
const ValueType *nonzero_values,
//const ValueType* __restrict nonzero_values,
const IndexType num_nonzero_blocks,
float *str_edge_weights, float *rand_edge_weights, int num_owned, int bsize, int component)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int i, j;
int bsize_sq = bsize * bsize;
float kvalue;
int matrix_weight_entry = component * bsize + component;
bool valid_tid;
while (utils::any(valid_tid = tid < num_nonzero_blocks))
{
i = -1;
double d1, d2, w1;
if (valid_tid)
{
if ( rand_edge_weights != NULL )
{
rand_edge_weights[tid] = random_weight(i, j, num_owned);
}
i = row_indices[tid];
j = column_indices[tid];
d1 = types::util<ValueType>::abs(__cachingLoad(&nonzero_values[__load_nc(dia_values + i) * bsize_sq + matrix_weight_entry]));
d2 = types::util<ValueType>::abs(__cachingLoad(&nonzero_values[__load_nc(dia_values + j) * bsize_sq + matrix_weight_entry]));
}
const bool valid_j = valid_tid && i != j && j < num_owned;
int ki = -1; //my transpose index, initialized to not found
//int diag_j = -1; //j diagonal index
if (!utils::any(valid_j))
{
continue;
}
int kmin = 0, kmax = 0;
if (valid_j)
{
kmin = __cachingLoad(&row_offsets[j ]);
kmax = __cachingLoad(&row_offsets[j + 1]);
}
for ( int k = kmin ; k < kmax ; ++k )
{
const int idx = __load_nc(column_indices + k);
if (idx == i)
{
ki = k; //find the transpose ji
}
}
kvalue = 0.0f;
if (ki > -1)
{
kvalue = types::util<ValueType>::abs(__cachingLoad(&nonzero_values[ki * bsize_sq + matrix_weight_entry]));
}
if (valid_tid)
{
w1 = types::util<ValueType>::abs(__cachingLoad(&nonzero_values[tid * bsize_sq + matrix_weight_entry]));
str_edge_weights[tid] = 0.5 * (w1 + kvalue) / ( (float) max(d1, d2) ) * valid_j;
}
tid += gridDim.x * blockDim.x;
}
}
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
template<class T, class M, class V>
struct async_size8_task : public task_cuda
{
T *size8;
M *A;
V *aggregates;
V *aggregates_global;
int *num_aggregates;
typedef typename T::IndexType IndexType;
void run()
{
const IndexType nnz_per_row = (*A).get_num_nz() / (*A).get_num_rows();
if (0)
{
}
else if (nnz_per_row > 2)
{
size8->template setAggregates_common_sqblock_avg_specialized<4>(*A, *aggregates, *aggregates_global, *num_aggregates);
}
else if (nnz_per_row > 1)
{
size8->template setAggregates_common_sqblock_avg_specialized<4>(*A, *aggregates, *aggregates_global, *num_aggregates);
}
else
{
size8->template setAggregates_common_sqblock_avg_specialized<2>(*A, *aggregates, *aggregates_global, *num_aggregates);
}
}
};
template<class T, class M, class V>
async_size8_task<T, M, V> *make_async_size8_task(T *size8, M &A, V &aggregates, V &aggregates_global, int &num_aggregates)
{
async_size8_task<T, M, V> *ret = new async_size8_task<T, M, V>;
ret->size8 = size8;
ret->A = &A;
ret->aggregates = &aggregates;
ret->aggregates_global = &aggregates_global;
ret->num_aggregates = &num_aggregates;
static task_chain_cuda_streamset *ss = new task_chain_cuda_streamset(1);
task_chain_cuda *cr = new task_chain_cuda(ss);
cr->append(ret, asyncmanager::singleton()->main_thread_queue(2));
return ret;
}
#endif
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Size8Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblock(const Matrix_d &A,
typename Matrix_d::IVector &aggregates,
typename Matrix_d::IVector &aggregates_global,
int &num_aggregates)
{
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
bool task = false;
bool push = false;
if (task)
{
task_cuda *t = make_async_size8_task(this, A, aggregates, aggregates_global, num_aggregates);
enqueue_async_get_receipt(asyncmanager::singleton()->global_parallel_queue, t)->wait();
return;
}
hipStream_t stream_old;
static hipStream_t stream = 0;
if (push)
{
stream_old = thrust::global_thread_handle::threadStream[getCurrentThreadId()];
hipStreamSynchronize(thrust::global_thread_handle::threadStream[getCurrentThreadId()]);
if (stream == 0) { hipStreamCreate(&stream); }
thrust::global_thread_handle::threadStream[getCurrentThreadId()] = stream;
}
#endif
const IndexType nnz_per_row = A.get_num_nz() / A.get_num_rows();
if (0)
{
}
else if (nnz_per_row > 2)
{
setAggregates_common_sqblock_avg_specialized<4>(A, aggregates, aggregates_global, num_aggregates);
}
else if (nnz_per_row > 1)
{
setAggregates_common_sqblock_avg_specialized<4>(A, aggregates, aggregates_global, num_aggregates);
}
else
{
setAggregates_common_sqblock_avg_specialized<2>(A, aggregates, aggregates_global, num_aggregates);
}
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
if (push)
{
//hipStreamSynchronize(stream);
//thrust::global_thread_handle::threadStream[getCurrentThreadId()] = stream_old;
}
#endif
}
// setAggregates for block_dia_csr_matrix_d format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
template<int AVG_NNZ>
void Size8Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >:: setAggregates_common_sqblock_avg_specialized(const Matrix_d &A,
typename Matrix_d::IVector &aggregates, typename Matrix_d::IVector &aggregates_global, int &num_aggregates)
{
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
hipStream_t stream = thrust::global_thread_handle::threadStream[getCurrentThreadId()];
#else
hipStream_t stream = thrust::global_thread_handle::get_stream();
#endif
const IndexType num_block_rows = A.get_num_rows();
const IndexType num_nonzero_blocks = A.get_num_nz();
if (!A.is_matrix_singleGPU())
{
aggregates.resize(A.manager->halo_offset(A.manager->num_neighbors()));
}
else
{
aggregates.resize(num_block_rows);
}
// Initially, put each vertex in its own aggregate
thrust::sequence(aggregates.begin(), aggregates.begin() + num_block_rows);
cudaCheckError();
IndexType *aggregates_ptr = aggregates.raw();
// Create row_indices array
IndexType total_nz = (A.is_matrix_singleGPU()) ? num_nonzero_blocks : A.manager->num_nz_all();
typename Matrix_d::IVector row_indices(total_nz);
cusp::detail::offsets_to_indices(A.row_offsets, row_indices);
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_row_indices_ptr = row_indices.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_idx_ptr = A.diag.raw();
//const ValueType *A_dia_val_ptr = thrust::raw_pointer_cast(&A.values[A.get_block_size()*A.diagOffset()]);
const ValueType *A_nonzero_values_ptr = A.values.raw();
typename Matrix_d::IVector strongest_neighbour(num_block_rows, -1);
typename Matrix_d::IVector partner_index(3 * num_block_rows, -1);
typename Matrix_d::IVector strongest_neighbour_tmp(num_block_rows);
IndexType *strongest_neighbour_ptr = strongest_neighbour.raw();
IndexType *partner_index_ptr = partner_index.raw();
const int threads_per_block = 256;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (num_block_rows - 1) / threads_per_block + 1);
int numUnassigned = num_block_rows;
int numUnassigned_previous = numUnassigned;
Vector<TemplateConfig<AMGX_device, AMGX_vecFloat, t_matPrec, t_indPrec> > edge_weights(num_nonzero_blocks + 8); //8-padded
float *edge_weights_ptr = edge_weights.raw();
float *rand_edge_weights_ptr = NULL;
const int num_blocks_V2 = min( AMGX_GRID_MAX_SIZE, (num_nonzero_blocks - 1) / threads_per_block + 1);
// Compute the edge weights
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
int avoid_thrust_count = 1;//0;
int newFindStrongest = 1;//0;
int newWeights = 1;//0;
#else
int avoid_thrust_count = 0;//0;
int newFindStrongest = 0;//0;
int newWeights = 0;//0;
#endif
int usenumassignedassumption = false;
if (newWeights == 0)
{
hipFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType, ValueType, float>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( computeEdgeWeightsBlockDiaCsr_V2) , dim3(num_blocks_V2), dim3(threads_per_block), 0, stream,
A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, num_nonzero_blocks, edge_weights_ptr, rand_edge_weights_ptr, num_block_rows, A.get_block_dimy(), this->m_aggregation_edge_weight_component, this->weight_formula);
cudaCheckError();
}
else
{
hipFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2_1<AVG_NNZ, IndexType, ValueType>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( computeEdgeWeightsBlockDiaCsr_V2_1<AVG_NNZ, IndexType, ValueType>) , dim3(num_blocks_V2), dim3(threads_per_block), 0, stream,
A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, num_nonzero_blocks, edge_weights_ptr, rand_edge_weights_ptr, num_block_rows, A.get_block_dimy(), this->m_aggregation_edge_weight_component);
cudaCheckError();
}
// -------------------------------------------------
// First create aggregates of size 2
// -------------------------------------------------
int icount = 0;
const int num_blocks_1024 = min( 13 * 2, (num_block_rows - 1) / 1024 + 1);
device_vector_alloc<int> sets_per_block_t(num_blocks_1024);
int *sets_per_block = thrust::raw_pointer_cast(sets_per_block_t.data());
cudaCheckError();
do
{
if (newFindStrongest)
{
if (numUnassigned == num_block_rows && usenumassignedassumption)
{
hipFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_NOMERGE, 1, 0, int>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_NOMERGE, 1, 0, int>) , dim3(num_blocks), dim3(threads_per_block), 0, stream,
A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, 0, 0, strongest_neighbour_ptr, partner_index_ptr, 0, this->deterministic, 0, 0);
}
else
{
hipFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_NOMERGE, 0, 0, int>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_NOMERGE, 0, 0, int>) , dim3(num_blocks), dim3(threads_per_block), 0, stream,
A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, 0, 0, strongest_neighbour_ptr, partner_index_ptr, 0, this->deterministic, 0, 0);
}
}
else
{
hipLaunchKernelGGL(( findStrongestNeighbourBlockDiaCsr_NoMerge) , dim3(num_blocks), dim3(threads_per_block), 0, stream, A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, partner_index_ptr, strongest_neighbour_ptr, this->deterministic);
}
cudaCheckError();
// Look for perfect matches
numUnassigned_previous = numUnassigned;
if (avoid_thrust_count == 0)
{
hipLaunchKernelGGL(( matchEdges) , dim3(num_blocks), dim3(threads_per_block), 0, stream, num_block_rows, partner_index_ptr, aggregates_ptr, strongest_neighbour_ptr);
cudaCheckError();
numUnassigned = (int)thrust::count(partner_index.begin(), partner_index.begin() + num_block_rows, -1);
cudaCheckError();
}
else
{
hipLaunchKernelGGL(( my_MatchEdges) , dim3(num_blocks_1024), dim3(1024), 0, stream, num_block_rows, partner_index_ptr, aggregates_ptr, strongest_neighbour_ptr, sets_per_block);
cudaCheckError();
numUnassigned = numUnassigned_previous - amgx::strided_reduction::count_block_results_pinned_memory(0, icount, num_blocks_1024, sets_per_block, amgx::strided_reduction::op_sum(), stream);
}
icount++;
}
while (!(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_block_rows < this->numUnassigned_tol || numUnassigned_previous == numUnassigned));
hipLaunchKernelGGL(( assignUnassignedVertices) , dim3(num_blocks), dim3(threads_per_block), 0, stream, partner_index_ptr, num_block_rows);
cudaCheckError();
// -------------------------------------------------
// Merge aggregates to create aggregates of size 4
// -------------------------------------------------
Vector<TemplateConfig<AMGX_device, AMGX_vecFloat, t_matPrec, t_indPrec> > weight_strongest_neighbour(num_block_rows, -1);
float *weight_strongest_neighbour_ptr = weight_strongest_neighbour.raw();
// At this point, partner index contain either your index or your neighbours index, depending on weither you're matched or not
// aggregates contain the largest vertex index of vertices in aggregate
typename Matrix_d::IVector aggregated(num_block_rows, -1);
IndexType *aggregated_ptr = aggregated.raw();
// now used as flag to check if aggregated or not
icount = 0;
numUnassigned = num_block_rows;
numUnassigned_previous = numUnassigned;
do
{
// Each vertex stores in strongest_neighbour the aggregates number of strongest neighbour and the weight of connection
if (newFindStrongest)
{
if (numUnassigned == num_block_rows && usenumassignedassumption)
{
hipFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS, 1, 0, int>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS, 1, 0, int>) , dim3(num_blocks), dim3(threads_per_block), 0, stream, A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr, this->deterministic, 0, 0);
}
else
{
hipFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS, 0, 0, int>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS, 0, 0, int>) , dim3(num_blocks), dim3(threads_per_block), 0, stream, A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr, this->deterministic, 0, 0);
}
}
else
{
hipLaunchKernelGGL(( findStrongestNeighbourBlockDiaCsr_StoreWeight) , dim3(num_blocks), dim3(threads_per_block), 0, stream, A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr, this->deterministic);
}
cudaCheckError();
// Each vertex in same aggregates will agree on aggregates to propose too, and both store the aggregate number they want to match with
hipLaunchKernelGGL(( agreeOnProposal) , dim3(num_blocks), dim3(threads_per_block), 0, stream, A_row_offsets_ptr, A_column_indices_ptr, num_block_rows, aggregated_ptr, strongest_neighbour_ptr, weight_strongest_neighbour_ptr, partner_index_ptr, aggregates_ptr);
cudaCheckError();
numUnassigned_previous = numUnassigned;
if (avoid_thrust_count == 0)
{
hipLaunchKernelGGL(( matchAggregatesSize4 <IndexType>) , dim3(num_blocks), dim3(threads_per_block), 0, stream, aggregates_ptr, aggregated_ptr, strongest_neighbour_ptr, partner_index_ptr, num_block_rows);
numUnassigned = thrust::count(aggregated.begin(), aggregated.end(), -1);
}
else
{
hipLaunchKernelGGL(( my_matchAggregatesSize4) , dim3(num_blocks_1024), dim3(1024), 0, stream, aggregates_ptr, aggregated_ptr, strongest_neighbour_ptr, partner_index_ptr, num_block_rows, sets_per_block);
numUnassigned = numUnassigned_previous - amgx::strided_reduction::count_block_results_pinned_memory(1, icount, num_blocks_1024, sets_per_block, amgx::strided_reduction::op_sum(), stream);
}
cudaCheckError();
icount++;
}
while (!(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_block_rows < this->numUnassigned_tol || numUnassigned == numUnassigned_previous) );
hipLaunchKernelGGL(( assignUnassignedVertices_2) , dim3(num_blocks), dim3(threads_per_block), 0, stream, partner_index_ptr, num_block_rows);
cudaCheckError();
// -------------------------------------------------
// Merge aggregates to create aggregates of size 8
// -------------------------------------------------
thrust::fill(aggregated.begin(), aggregated.end(), -1);
cudaCheckError();
thrust::fill(weight_strongest_neighbour.begin(), weight_strongest_neighbour.end(), -1.);
cudaCheckError();
icount = 0;
numUnassigned = num_block_rows;
numUnassigned_previous = numUnassigned;
do
{
// Each vertex stores in strongest_neighbour the aggregates number of strongest neighbour and the weight of connection
if (newFindStrongest)
{
if (numUnassigned == num_block_rows && usenumassignedassumption)
{
hipFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS_2, 1, 0, int>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS_2, 1, 0, int>) , dim3(num_blocks), dim3(threads_per_block), 0, stream, A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr, this->deterministic, 0, 0);
}
else
{
hipFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS_2, 0, 0, int>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS_2, 0, 0, int>) , dim3(num_blocks), dim3(threads_per_block), 0, stream, A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr, this->deterministic, 0, 0);
}
}
else
{
hipLaunchKernelGGL(( findStrongestNeighbourBlockDiaCsr_StoreWeight_2) , dim3(num_blocks), dim3(threads_per_block), 0, stream, A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr);
}
cudaCheckError();
// Each vertex in same aggregates will agree on aggregates to propose too, and both store the aggregate number they want to match with
if (!this->deterministic)
{
hipLaunchKernelGGL(( agreeOnProposal_2) , dim3(num_blocks), dim3(threads_per_block), 0, stream, A_row_offsets_ptr, A_column_indices_ptr, num_block_rows, aggregated_ptr, strongest_neighbour_ptr, weight_strongest_neighbour_ptr, partner_index_ptr, aggregates_ptr, this->deterministic);
}
else
{
//strongest_neighbour_tmp = strongest_neighbour; // copied that directly in the kernel
hipLaunchKernelGGL(( agreeOnProposal_2_deterministic) , dim3(num_blocks), dim3(threads_per_block), 0, stream,
strongest_neighbour_tmp.raw(),
A_row_offsets_ptr,
A_column_indices_ptr, num_block_rows,
aggregated_ptr, strongest_neighbour_ptr,
weight_strongest_neighbour_ptr, partner_index_ptr, aggregates_ptr, this->deterministic);
strongest_neighbour_tmp.swap(strongest_neighbour);
strongest_neighbour_ptr = strongest_neighbour.raw(); //re-saving the correct pointer..
}
cudaCheckError();
numUnassigned_previous = numUnassigned;
if (avoid_thrust_count == 0)
{
hipLaunchKernelGGL(( matchAggregates <IndexType>) , dim3(num_blocks), dim3(threads_per_block), 0, stream, aggregates_ptr, aggregated_ptr, strongest_neighbour_ptr, num_block_rows);
numUnassigned = thrust::count(aggregated.begin(), aggregated.end(), -1);
}
else
{
hipLaunchKernelGGL(( my_matchAggregates) , dim3(num_blocks_1024), dim3(1024), 0, stream, aggregates_ptr, aggregated_ptr, strongest_neighbour_ptr, num_block_rows, sets_per_block);
numUnassigned = numUnassigned_previous - amgx::strided_reduction::count_block_results_pinned_memory(2, icount, num_blocks_1024, sets_per_block, amgx::strided_reduction::op_sum(), stream);
}
cudaCheckError();
icount++;
}
while (!(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_block_rows < this->numUnassigned_tol || numUnassigned_previous == numUnassigned) );
// Merge remaining vertices with current aggregates
int local_iter = 0;
if (!this->deterministic)
{
while (numUnassigned != 0)
{
hipLaunchKernelGGL(( mergeWithExistingAggregatesBlockDiaCsr) , dim3(num_blocks), dim3(threads_per_block), 0, stream, A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, aggregated_ptr, this->deterministic, (IndexType *) NULL, local_iter > 1);
cudaCheckError();
numUnassigned_previous = numUnassigned;
numUnassigned = (int)thrust::count(aggregated.begin(), aggregated.end(), -1);
cudaCheckError();
local_iter++;
}
}
else
{
typename Matrix_d::IVector aggregates_candidate(num_block_rows, -1);
while (numUnassigned != 0)
{
// allow singletons only from the 2nd local iteration
hipLaunchKernelGGL(( mergeWithExistingAggregatesBlockDiaCsr) , dim3(num_blocks), dim3(threads_per_block), 0, stream, A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, aggregated_ptr, this->deterministic, aggregates_candidate.raw(), local_iter > 1);
cudaCheckError();
numUnassigned_previous = numUnassigned;
if (avoid_thrust_count == 0)
{
hipLaunchKernelGGL(( joinExistingAggregates) , dim3(num_blocks), dim3(threads_per_block), 0, stream, num_block_rows, aggregates_ptr, aggregated_ptr, aggregates_candidate.raw());
numUnassigned = (int)thrust::count(aggregated.begin(), aggregated.end(), -1);
}
else
{
hipLaunchKernelGGL(( my_joinExistingAggregates) , dim3(num_blocks_1024), dim3(1024), 0, stream, num_block_rows, aggregates_ptr, aggregated_ptr, aggregates_candidate.raw(), sets_per_block);
numUnassigned = numUnassigned_previous - amgx::strided_reduction::count_block_results_pinned_memory(3, local_iter, num_blocks_1024, sets_per_block, amgx::strided_reduction::op_sum(), stream);
}
cudaCheckError();
local_iter++;
}
aggregates_candidate.resize(0);
}
this->renumberAndCountAggregates(aggregates, aggregates_global, num_block_rows, num_aggregates);
//analyze_coloring(aggregates, A.getMatrixColoring().getRowColors());
}
template <class T_Config>
void Size8SelectorBase<T_Config>::setAggregates(Matrix<T_Config> &A,
IVector &aggregates, IVector &aggregates_global, int &num_aggregates)
{
if (A.get_block_dimx() == A.get_block_dimy())
{
setAggregates_common_sqblock( A, aggregates, aggregates_global, num_aggregates );
}
else
{
FatalError("Unsupported block size for Size8Selector", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
}
// -------------------------
// Explict instantiations
// -------------------------
#define AMGX_CASE_LINE(CASE) template class Size8SelectorBase<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class Size8Selector<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
}
}
|
c9d4371687f3b8ce30bb73993d98928e052d6cd0.cu
|
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cutil.h>
#include <util.h>
#include <types.h>
#include <basic_types.h>
#include <texture.h>
#include <matrix_analysis.h>
#include <aggregation/selectors/size8_selector.h>
#include <thrust/count.h> //count
#include <thrust/sort.h> //sort
#include <thrust/unique.h> //unique
#include <thrust/remove.h> //remove
#include <thrust/transform_scan.h> //transform_inclusive_scan
#include <cusp/detail/format_utils.h> //offsets_to_indices
#include <iostream>
#include <strided_reduction.h>
#include <aggregation/selectors/selector_kernels.h>
namespace amgx
{
namespace strided_reduction
{
template<int STRIDE, class scalar_t, class OP>
void count_block_results_pinned_memory(scalar_t *out_host, const int n_blocks, scalar_t *out_d, const OP &op = OP(), cudaStream_t stream = 0)
{
strided_reduction_collect_partials<scalar_t, STRIDE, 32, OP> <<< 1, 32, 0, stream>>>(out_host, out_d, n_blocks);
cudaCheckError();
}
template<class scalar_t, class OP>
scalar_t count_block_results_pinned_memory(const int a, const int i, const int n_blocks, scalar_t *out_d, const OP &op = OP(), cudaStream_t stream = 0) //STRIDE=1 case
{
static scalar_t *ret = 0;
static cudaEvent_t throttle_event = 0;
const int buffers = 1;
if (ret == 0)
{
thrust::global_thread_handle::cudaMallocHost((void **)&ret, buffers * sizeof(scalar_t));
ret[0] = 0;
cudaEventCreateWithFlags(&throttle_event, cudaEventDisableTiming);
}
int ib = i % buffers;
count_block_results_pinned_memory<1, scalar_t, OP>(ret + ib, n_blocks, out_d, op, stream);
if (ib == buffers - 1)
{
cudaEventRecord(throttle_event);
cudaEventSynchronize(throttle_event);
scalar_t tot = 0;
for (int j = 0; j < buffers; j++)
{
tot += ret[j];
}
return tot + buffers - 1;
}
else
{
return -1;
}
}
}
void analyze_coloring(device_vector_alloc<int> aggregates_d, device_vector_alloc<int> colors_d);
namespace aggregation
{
namespace size8_selector
{
// include common routines for all selectors
#include <aggregation/selectors/common_selector.h>
// ------------------------
// Kernels
// ------------------------
// findStrongestNeighbour kernel for block_dia_csr_matrix format
// Reads the weight from edge_weights array
template <typename IndexType>
__global__
void findStrongestNeighbourBlockDiaCsr_StoreWeight_2(const IndexType *row_offsets, const IndexType *column_indices,
const float *edge_weights, const IndexType num_block_rows, IndexType *aggregated, IndexType *aggregates, int *strongest_neighbour, IndexType *partner_index, float *weight_strongest_neighbour)
{
float weight;
int jcol, jmin, jmax;
int partner0, partner1, partner2;
int agg_jcol;
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; tid < num_block_rows; tid += blockDim.x * gridDim.x)
{
float max_weight_unaggregated = 0.;
float max_weight_aggregated = 0.;
int strongest_unaggregated = -1;
int strongest_aggregated = -1;
if (aggregated[tid] == -1) // Unaggregated row
{
partner0 = partner_index[tid];
partner1 = partner_index[num_block_rows + tid];
partner2 = partner_index[2 * num_block_rows + tid];
jmin = row_offsets[tid];
jmax = row_offsets[tid + 1];
for (int j = jmin; j < jmax; j++)
{
jcol = column_indices[j];
if (jcol == tid || jcol >= num_block_rows) { continue; }
weight = edge_weights[j];
agg_jcol = aggregated[jcol];
if (jcol != partner0 && jcol != partner1 && jcol != partner2)
{
if (agg_jcol == -1 && (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
else if (agg_jcol != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) // unaggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
}
if (strongest_unaggregated == -1 && strongest_aggregated != -1) // all neighbours are aggregated, store the strongest aggregated
{
weight_strongest_neighbour[tid] = -max_weight_aggregated;
strongest_neighbour[tid] = aggregates[strongest_aggregated];
}
else if (strongest_unaggregated != -1)
{
weight_strongest_neighbour[tid] = max_weight_unaggregated;
strongest_neighbour[tid] = aggregates[strongest_unaggregated];
}
}
}
}
// findStrongestNeighbour kernel for block_dia_csr_matrix format
// Reads the weight from edge_weights array
template <typename IndexType>
__global__
void agreeOnProposal_2(const IndexType *row_offsets, const IndexType *column_indices,
IndexType num_block_rows, IndexType *aggregated, int *strongest_neighbour, float *weight_strongest_neighbour, IndexType *partner_index, int *aggregates, int deterministic)
{
int partner[3];
float weight[3];
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; tid < num_block_rows; tid += blockDim.x * gridDim.x)
{
int strongest_partner = -1;
float my_weight = 0.;
if (aggregated[tid] == -1)
{
my_weight = weight_strongest_neighbour[tid];
#pragma unroll
for (int m = 0; m < 3; m++)
{
partner[m] = partner_index[tid + m * num_block_rows];
}
#pragma unroll
for (int m = 0; m < 3; m++)
{
weight[m] = weight_strongest_neighbour[partner[m]];
}
#pragma unroll
for (int m = 0; m < 3; m++)
{
//if (weight[m] > my_weight && weight[m] > 0.) // there is a partner that has an unaggregated neighbour
if (weight[m] > my_weight)
{
if (weight[m] > 0.) // there is a partner that has an unaggregated neighbour
{
strongest_partner = m;
my_weight = weight[m];
}
}
else if (weight[m] < my_weight) // there is a partner without an unaggregated neighbour, whose neighbour is stronger than mine
{
if (my_weight < 0.)
{
strongest_partner = m;
my_weight = weight[m];
}
}
}
if (my_weight < 0.) // means all neighbours of vertices in aggregate are aggregated, merge to another aggregate
{
if (!deterministic)
{
aggregated[tid] = 1;
aggregates[tid] = strongest_partner != -1 ? strongest_neighbour[partner[strongest_partner]] : strongest_neighbour[tid];
}
}
else if (strongest_partner != -1) // store my partner's pick
{
strongest_neighbour[tid] = strongest_neighbour[partner[strongest_partner]];
}
}
}
}
template <typename IndexType>
__global__
void agreeOnProposal_2_deterministic(int *strongest_neighbour_out, const IndexType *row_offsets, const IndexType *column_indices,
IndexType num_block_rows, IndexType *aggregated, int *strongest_neighbour, float *weight_strongest_neighbour, IndexType *partner_index, int *aggregates, int deterministic)
{
int partner[3];
float weight[3];
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; tid < num_block_rows; tid += blockDim.x * gridDim.x)
{
int strongest_partner = -1;
float my_weight = 0.;
//copy here to avoid redundant copies before launching the kernel
int new_strongest_neighbour_out = strongest_neighbour[tid];
if (aggregated[tid] == -1)
{
my_weight = weight_strongest_neighbour[tid];
#pragma unroll
for (int m = 0; m < 3; m++)
{
partner[m] = partner_index[tid + m * num_block_rows];
}
#pragma unroll
for (int m = 0; m < 3; m++)
{
weight[m] = weight_strongest_neighbour[partner[m]];
}
#pragma unroll
for (int m = 0; m < 3; m++)
{
if (weight[m] > my_weight)
{
if (weight[m] > 0.) // there is a partner that has an unaggregated neighbour
{
strongest_partner = m;
my_weight = weight[m];
}
}
else if (weight[m] < my_weight) // there is a partner without an unaggregated neighbour, whose neighbour is stronger than mine
{
if (my_weight < 0.)
{
strongest_partner = m;
my_weight = weight[m];
}
}
}
if (my_weight < 0.) // means all neighbours of vertices in aggregate are aggregated, merge to another aggregate
{
if (!deterministic)
{
aggregated[tid] = 1;
aggregates[tid] = strongest_partner != -1 ? strongest_neighbour[partner[strongest_partner]] : strongest_neighbour[tid];
}
}
else if (strongest_partner != -1) // store my partner's pick
{
new_strongest_neighbour_out = strongest_neighbour[partner[strongest_partner]];
}
}
//copy here to avoid redundant copies before launching the kernel
strongest_neighbour_out[tid] = new_strongest_neighbour_out;
}
}
// Kernel that checks if perfect matchs exist
template <typename IndexType>
__global__
void matchAggregatesSize4(IndexType *aggregates, IndexType *aggregated, IndexType *strongest_neighbour, IndexType *partner_index, const IndexType num_rows)
{
int potential_match, potential_match_neighbour, my_aggregate;
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; tid < num_rows; tid += blockDim.x * gridDim.x)
{
if (aggregated[tid] == -1) // Unaggregated row
{
potential_match = strongest_neighbour[tid];
if (potential_match != -1)
{
potential_match_neighbour = strongest_neighbour[potential_match];
my_aggregate = aggregates[tid];
if (potential_match_neighbour == my_aggregate) // we have a match
{
aggregated[tid] = 1;
aggregates[tid] = ( potential_match > my_aggregate) ? my_aggregate : potential_match;
partner_index[tid + num_rows] = potential_match;
partner_index[tid + 2 * num_rows] = partner_index[potential_match];
}
}
}
}
}
template <typename IndexType>
__global__
void assignUnassignedVertices_2(IndexType *partner_index, const IndexType num_rows)
{
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; tid < num_rows; tid += blockDim.x * gridDim.x)
{
if (partner_index[num_rows + tid] == -1) // Unaggregated row
{
partner_index[num_rows + tid] = tid;
}
if (partner_index[2 * num_rows + tid] == -1) // Unaggregated row
{
partner_index[2 * num_rows + tid] = tid;
}
}
}
// -----------------
// Methods
// ----------------
// Constructor
template<class T_Config>
Size8SelectorBase<T_Config>::Size8SelectorBase(AMG_Config &cfg, const std::string &cfg_scope)
{
deterministic = cfg.AMG_Config::getParameter<IndexType>("determinism_flag", "default");
max_iterations = cfg.AMG_Config::getParameter<IndexType>("max_matching_iterations", cfg_scope);
numUnassigned_tol = cfg.AMG_Config::getParameter<double>("max_unassigned_percentage", cfg_scope);
m_aggregation_edge_weight_component = cfg.AMG_Config::getParameter<int>("aggregation_edge_weight_component", cfg_scope);
weight_formula = cfg.AMG_Config::getParameter<int>("weight_formula", cfg_scope);
}
// setAggregates for block_dia_csr_matrix_h format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Size8Selector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblock(const Matrix_h &A,
IVector &aggregates, IVector &aggregates_global, int &num_aggregates)
{
FatalError("Size8 selector: setAggregates not implemented on CPU, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
template <int NUM_COLS, typename IndexType, typename ValueType>
__global__ //__launch_bounds__(256,2)
void computeEdgeWeightsBlockDiaCsr_V2_1(
const IndexType *row_offsets,
//const int* __myrestrict row_offsets,
const IndexType *row_indices,
const IndexType *column_indices,
const IndexType *dia_values,
const ValueType *nonzero_values,
//const ValueType* __restrict nonzero_values,
const IndexType num_nonzero_blocks,
float *str_edge_weights, float *rand_edge_weights, int num_owned, int bsize, int component)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int i, j;
int bsize_sq = bsize * bsize;
float kvalue;
int matrix_weight_entry = component * bsize + component;
bool valid_tid;
while (utils::any(valid_tid = tid < num_nonzero_blocks))
{
i = -1;
double d1, d2, w1;
if (valid_tid)
{
if ( rand_edge_weights != NULL )
{
rand_edge_weights[tid] = random_weight(i, j, num_owned);
}
i = row_indices[tid];
j = column_indices[tid];
d1 = types::util<ValueType>::abs(__cachingLoad(&nonzero_values[__load_nc(dia_values + i) * bsize_sq + matrix_weight_entry]));
d2 = types::util<ValueType>::abs(__cachingLoad(&nonzero_values[__load_nc(dia_values + j) * bsize_sq + matrix_weight_entry]));
}
const bool valid_j = valid_tid && i != j && j < num_owned;
int ki = -1; //my transpose index, initialized to not found
//int diag_j = -1; //j diagonal index
if (!utils::any(valid_j))
{
continue;
}
int kmin = 0, kmax = 0;
if (valid_j)
{
kmin = __cachingLoad(&row_offsets[j ]);
kmax = __cachingLoad(&row_offsets[j + 1]);
}
for ( int k = kmin ; k < kmax ; ++k )
{
const int idx = __load_nc(column_indices + k);
if (idx == i)
{
ki = k; //find the transpose ji
}
}
kvalue = 0.0f;
if (ki > -1)
{
kvalue = types::util<ValueType>::abs(__cachingLoad(&nonzero_values[ki * bsize_sq + matrix_weight_entry]));
}
if (valid_tid)
{
w1 = types::util<ValueType>::abs(__cachingLoad(&nonzero_values[tid * bsize_sq + matrix_weight_entry]));
str_edge_weights[tid] = 0.5 * (w1 + kvalue) / ( (float) max(d1, d2) ) * valid_j;
}
tid += gridDim.x * blockDim.x;
}
}
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
template<class T, class M, class V>
struct async_size8_task : public task_cuda
{
T *size8;
M *A;
V *aggregates;
V *aggregates_global;
int *num_aggregates;
typedef typename T::IndexType IndexType;
void run()
{
const IndexType nnz_per_row = (*A).get_num_nz() / (*A).get_num_rows();
if (0)
{
}
else if (nnz_per_row > 2)
{
size8->template setAggregates_common_sqblock_avg_specialized<4>(*A, *aggregates, *aggregates_global, *num_aggregates);
}
else if (nnz_per_row > 1)
{
size8->template setAggregates_common_sqblock_avg_specialized<4>(*A, *aggregates, *aggregates_global, *num_aggregates);
}
else
{
size8->template setAggregates_common_sqblock_avg_specialized<2>(*A, *aggregates, *aggregates_global, *num_aggregates);
}
}
};
template<class T, class M, class V>
async_size8_task<T, M, V> *make_async_size8_task(T *size8, M &A, V &aggregates, V &aggregates_global, int &num_aggregates)
{
async_size8_task<T, M, V> *ret = new async_size8_task<T, M, V>;
ret->size8 = size8;
ret->A = &A;
ret->aggregates = &aggregates;
ret->aggregates_global = &aggregates_global;
ret->num_aggregates = &num_aggregates;
static task_chain_cuda_streamset *ss = new task_chain_cuda_streamset(1);
task_chain_cuda *cr = new task_chain_cuda(ss);
cr->append(ret, asyncmanager::singleton()->main_thread_queue(2));
return ret;
}
#endif
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Size8Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblock(const Matrix_d &A,
typename Matrix_d::IVector &aggregates,
typename Matrix_d::IVector &aggregates_global,
int &num_aggregates)
{
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
bool task = false;
bool push = false;
if (task)
{
task_cuda *t = make_async_size8_task(this, A, aggregates, aggregates_global, num_aggregates);
enqueue_async_get_receipt(asyncmanager::singleton()->global_parallel_queue, t)->wait();
return;
}
cudaStream_t stream_old;
static cudaStream_t stream = 0;
if (push)
{
stream_old = thrust::global_thread_handle::threadStream[getCurrentThreadId()];
cudaStreamSynchronize(thrust::global_thread_handle::threadStream[getCurrentThreadId()]);
if (stream == 0) { cudaStreamCreate(&stream); }
thrust::global_thread_handle::threadStream[getCurrentThreadId()] = stream;
}
#endif
const IndexType nnz_per_row = A.get_num_nz() / A.get_num_rows();
if (0)
{
}
else if (nnz_per_row > 2)
{
setAggregates_common_sqblock_avg_specialized<4>(A, aggregates, aggregates_global, num_aggregates);
}
else if (nnz_per_row > 1)
{
setAggregates_common_sqblock_avg_specialized<4>(A, aggregates, aggregates_global, num_aggregates);
}
else
{
setAggregates_common_sqblock_avg_specialized<2>(A, aggregates, aggregates_global, num_aggregates);
}
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
if (push)
{
//cudaStreamSynchronize(stream);
//thrust::global_thread_handle::threadStream[getCurrentThreadId()] = stream_old;
}
#endif
}
// setAggregates for block_dia_csr_matrix_d format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
template<int AVG_NNZ>
void Size8Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >:: setAggregates_common_sqblock_avg_specialized(const Matrix_d &A,
typename Matrix_d::IVector &aggregates, typename Matrix_d::IVector &aggregates_global, int &num_aggregates)
{
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
cudaStream_t stream = thrust::global_thread_handle::threadStream[getCurrentThreadId()];
#else
cudaStream_t stream = thrust::global_thread_handle::get_stream();
#endif
const IndexType num_block_rows = A.get_num_rows();
const IndexType num_nonzero_blocks = A.get_num_nz();
if (!A.is_matrix_singleGPU())
{
aggregates.resize(A.manager->halo_offset(A.manager->num_neighbors()));
}
else
{
aggregates.resize(num_block_rows);
}
// Initially, put each vertex in its own aggregate
thrust::sequence(aggregates.begin(), aggregates.begin() + num_block_rows);
cudaCheckError();
IndexType *aggregates_ptr = aggregates.raw();
// Create row_indices array
IndexType total_nz = (A.is_matrix_singleGPU()) ? num_nonzero_blocks : A.manager->num_nz_all();
typename Matrix_d::IVector row_indices(total_nz);
cusp::detail::offsets_to_indices(A.row_offsets, row_indices);
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_row_indices_ptr = row_indices.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_idx_ptr = A.diag.raw();
//const ValueType *A_dia_val_ptr = thrust::raw_pointer_cast(&A.values[A.get_block_size()*A.diagOffset()]);
const ValueType *A_nonzero_values_ptr = A.values.raw();
typename Matrix_d::IVector strongest_neighbour(num_block_rows, -1);
typename Matrix_d::IVector partner_index(3 * num_block_rows, -1);
typename Matrix_d::IVector strongest_neighbour_tmp(num_block_rows);
IndexType *strongest_neighbour_ptr = strongest_neighbour.raw();
IndexType *partner_index_ptr = partner_index.raw();
const int threads_per_block = 256;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (num_block_rows - 1) / threads_per_block + 1);
int numUnassigned = num_block_rows;
int numUnassigned_previous = numUnassigned;
Vector<TemplateConfig<AMGX_device, AMGX_vecFloat, t_matPrec, t_indPrec> > edge_weights(num_nonzero_blocks + 8); //8-padded
float *edge_weights_ptr = edge_weights.raw();
float *rand_edge_weights_ptr = NULL;
const int num_blocks_V2 = min( AMGX_GRID_MAX_SIZE, (num_nonzero_blocks - 1) / threads_per_block + 1);
// Compute the edge weights
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
int avoid_thrust_count = 1;//0;
int newFindStrongest = 1;//0;
int newWeights = 1;//0;
#else
int avoid_thrust_count = 0;//0;
int newFindStrongest = 0;//0;
int newWeights = 0;//0;
#endif
int usenumassignedassumption = false;
if (newWeights == 0)
{
cudaFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType, ValueType, float>, cudaFuncCachePreferL1);
computeEdgeWeightsBlockDiaCsr_V2 <<< num_blocks_V2, threads_per_block, 0, stream>>>(
A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, num_nonzero_blocks, edge_weights_ptr, rand_edge_weights_ptr, num_block_rows, A.get_block_dimy(), this->m_aggregation_edge_weight_component, this->weight_formula);
cudaCheckError();
}
else
{
cudaFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2_1<AVG_NNZ, IndexType, ValueType>, cudaFuncCachePreferL1);
computeEdgeWeightsBlockDiaCsr_V2_1<AVG_NNZ, IndexType, ValueType> <<< num_blocks_V2, threads_per_block, 0, stream>>>(
A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, num_nonzero_blocks, edge_weights_ptr, rand_edge_weights_ptr, num_block_rows, A.get_block_dimy(), this->m_aggregation_edge_weight_component);
cudaCheckError();
}
// -------------------------------------------------
// First create aggregates of size 2
// -------------------------------------------------
int icount = 0;
const int num_blocks_1024 = min( 13 * 2, (num_block_rows - 1) / 1024 + 1);
device_vector_alloc<int> sets_per_block_t(num_blocks_1024);
int *sets_per_block = thrust::raw_pointer_cast(sets_per_block_t.data());
cudaCheckError();
do
{
if (newFindStrongest)
{
if (numUnassigned == num_block_rows && usenumassignedassumption)
{
cudaFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_NOMERGE, 1, 0, int>, cudaFuncCachePreferL1);
my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_NOMERGE, 1, 0, int> <<< num_blocks, threads_per_block, 0, stream>>>(
A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, 0, 0, strongest_neighbour_ptr, partner_index_ptr, 0, this->deterministic, 0, 0);
}
else
{
cudaFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_NOMERGE, 0, 0, int>, cudaFuncCachePreferL1);
my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_NOMERGE, 0, 0, int> <<< num_blocks, threads_per_block, 0, stream>>>(
A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, 0, 0, strongest_neighbour_ptr, partner_index_ptr, 0, this->deterministic, 0, 0);
}
}
else
{
findStrongestNeighbourBlockDiaCsr_NoMerge <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, partner_index_ptr, strongest_neighbour_ptr, this->deterministic);
}
cudaCheckError();
// Look for perfect matches
numUnassigned_previous = numUnassigned;
if (avoid_thrust_count == 0)
{
matchEdges <<< num_blocks, threads_per_block, 0, stream>>>(num_block_rows, partner_index_ptr, aggregates_ptr, strongest_neighbour_ptr);
cudaCheckError();
numUnassigned = (int)thrust::count(partner_index.begin(), partner_index.begin() + num_block_rows, -1);
cudaCheckError();
}
else
{
my_MatchEdges <<< num_blocks_1024, 1024, 0, stream>>>(num_block_rows, partner_index_ptr, aggregates_ptr, strongest_neighbour_ptr, sets_per_block);
cudaCheckError();
numUnassigned = numUnassigned_previous - amgx::strided_reduction::count_block_results_pinned_memory(0, icount, num_blocks_1024, sets_per_block, amgx::strided_reduction::op_sum(), stream);
}
icount++;
}
while (!(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_block_rows < this->numUnassigned_tol || numUnassigned_previous == numUnassigned));
assignUnassignedVertices <<< num_blocks, threads_per_block, 0, stream>>>(partner_index_ptr, num_block_rows);
cudaCheckError();
// -------------------------------------------------
// Merge aggregates to create aggregates of size 4
// -------------------------------------------------
Vector<TemplateConfig<AMGX_device, AMGX_vecFloat, t_matPrec, t_indPrec> > weight_strongest_neighbour(num_block_rows, -1);
float *weight_strongest_neighbour_ptr = weight_strongest_neighbour.raw();
// At this point, partner index contain either your index or your neighbours index, depending on weither you're matched or not
// aggregates contain the largest vertex index of vertices in aggregate
typename Matrix_d::IVector aggregated(num_block_rows, -1);
IndexType *aggregated_ptr = aggregated.raw();
// now used as flag to check if aggregated or not
icount = 0;
numUnassigned = num_block_rows;
numUnassigned_previous = numUnassigned;
do
{
// Each vertex stores in strongest_neighbour the aggregates number of strongest neighbour and the weight of connection
if (newFindStrongest)
{
if (numUnassigned == num_block_rows && usenumassignedassumption)
{
cudaFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS, 1, 0, int>, cudaFuncCachePreferL1);
my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS, 1, 0, int> <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr, this->deterministic, 0, 0);
}
else
{
cudaFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS, 0, 0, int>, cudaFuncCachePreferL1);
my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS, 0, 0, int> <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr, this->deterministic, 0, 0);
}
}
else
{
findStrongestNeighbourBlockDiaCsr_StoreWeight <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr, this->deterministic);
}
cudaCheckError();
// Each vertex in same aggregates will agree on aggregates to propose too, and both store the aggregate number they want to match with
agreeOnProposal <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, num_block_rows, aggregated_ptr, strongest_neighbour_ptr, weight_strongest_neighbour_ptr, partner_index_ptr, aggregates_ptr);
cudaCheckError();
numUnassigned_previous = numUnassigned;
if (avoid_thrust_count == 0)
{
matchAggregatesSize4 <IndexType> <<< num_blocks, threads_per_block, 0, stream>>>(aggregates_ptr, aggregated_ptr, strongest_neighbour_ptr, partner_index_ptr, num_block_rows);
numUnassigned = thrust::count(aggregated.begin(), aggregated.end(), -1);
}
else
{
my_matchAggregatesSize4 <<< num_blocks_1024, 1024, 0, stream>>>(aggregates_ptr, aggregated_ptr, strongest_neighbour_ptr, partner_index_ptr, num_block_rows, sets_per_block);
numUnassigned = numUnassigned_previous - amgx::strided_reduction::count_block_results_pinned_memory(1, icount, num_blocks_1024, sets_per_block, amgx::strided_reduction::op_sum(), stream);
}
cudaCheckError();
icount++;
}
while (!(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_block_rows < this->numUnassigned_tol || numUnassigned == numUnassigned_previous) );
assignUnassignedVertices_2 <<< num_blocks, threads_per_block, 0, stream>>>(partner_index_ptr, num_block_rows);
cudaCheckError();
// -------------------------------------------------
// Merge aggregates to create aggregates of size 8
// -------------------------------------------------
thrust::fill(aggregated.begin(), aggregated.end(), -1);
cudaCheckError();
thrust::fill(weight_strongest_neighbour.begin(), weight_strongest_neighbour.end(), -1.);
cudaCheckError();
icount = 0;
numUnassigned = num_block_rows;
numUnassigned_previous = numUnassigned;
do
{
// Each vertex stores in strongest_neighbour the aggregates number of strongest neighbour and the weight of connection
if (newFindStrongest)
{
if (numUnassigned == num_block_rows && usenumassignedassumption)
{
cudaFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS_2, 1, 0, int>, cudaFuncCachePreferL1);
my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS_2, 1, 0, int> <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr, this->deterministic, 0, 0);
}
else
{
cudaFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS_2, 0, 0, int>, cudaFuncCachePreferL1);
my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS_2, 0, 0, int> <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr, this->deterministic, 0, 0);
}
}
else
{
findStrongestNeighbourBlockDiaCsr_StoreWeight_2 <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr);
}
cudaCheckError();
// Each vertex in same aggregates will agree on aggregates to propose too, and both store the aggregate number they want to match with
if (!this->deterministic)
{
agreeOnProposal_2 <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, num_block_rows, aggregated_ptr, strongest_neighbour_ptr, weight_strongest_neighbour_ptr, partner_index_ptr, aggregates_ptr, this->deterministic);
}
else
{
//strongest_neighbour_tmp = strongest_neighbour; // copied that directly in the kernel
agreeOnProposal_2_deterministic <<< num_blocks, threads_per_block, 0, stream>>>(
strongest_neighbour_tmp.raw(),
A_row_offsets_ptr,
A_column_indices_ptr, num_block_rows,
aggregated_ptr, strongest_neighbour_ptr,
weight_strongest_neighbour_ptr, partner_index_ptr, aggregates_ptr, this->deterministic);
strongest_neighbour_tmp.swap(strongest_neighbour);
strongest_neighbour_ptr = strongest_neighbour.raw(); //re-saving the correct pointer..
}
cudaCheckError();
numUnassigned_previous = numUnassigned;
if (avoid_thrust_count == 0)
{
matchAggregates <IndexType> <<< num_blocks, threads_per_block, 0, stream>>>(aggregates_ptr, aggregated_ptr, strongest_neighbour_ptr, num_block_rows);
numUnassigned = thrust::count(aggregated.begin(), aggregated.end(), -1);
}
else
{
my_matchAggregates <<< num_blocks_1024, 1024, 0, stream>>>(aggregates_ptr, aggregated_ptr, strongest_neighbour_ptr, num_block_rows, sets_per_block);
numUnassigned = numUnassigned_previous - amgx::strided_reduction::count_block_results_pinned_memory(2, icount, num_blocks_1024, sets_per_block, amgx::strided_reduction::op_sum(), stream);
}
cudaCheckError();
icount++;
}
while (!(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_block_rows < this->numUnassigned_tol || numUnassigned_previous == numUnassigned) );
// Merge remaining vertices with current aggregates
int local_iter = 0;
if (!this->deterministic)
{
while (numUnassigned != 0)
{
mergeWithExistingAggregatesBlockDiaCsr <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, aggregated_ptr, this->deterministic, (IndexType *) NULL, local_iter > 1);
cudaCheckError();
numUnassigned_previous = numUnassigned;
numUnassigned = (int)thrust::count(aggregated.begin(), aggregated.end(), -1);
cudaCheckError();
local_iter++;
}
}
else
{
typename Matrix_d::IVector aggregates_candidate(num_block_rows, -1);
while (numUnassigned != 0)
{
// allow singletons only from the 2nd local iteration
mergeWithExistingAggregatesBlockDiaCsr <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, aggregated_ptr, this->deterministic, aggregates_candidate.raw(), local_iter > 1);
cudaCheckError();
numUnassigned_previous = numUnassigned;
if (avoid_thrust_count == 0)
{
joinExistingAggregates <<< num_blocks, threads_per_block, 0, stream>>>(num_block_rows, aggregates_ptr, aggregated_ptr, aggregates_candidate.raw());
numUnassigned = (int)thrust::count(aggregated.begin(), aggregated.end(), -1);
}
else
{
my_joinExistingAggregates <<< num_blocks_1024, 1024, 0, stream>>>(num_block_rows, aggregates_ptr, aggregated_ptr, aggregates_candidate.raw(), sets_per_block);
numUnassigned = numUnassigned_previous - amgx::strided_reduction::count_block_results_pinned_memory(3, local_iter, num_blocks_1024, sets_per_block, amgx::strided_reduction::op_sum(), stream);
}
cudaCheckError();
local_iter++;
}
aggregates_candidate.resize(0);
}
this->renumberAndCountAggregates(aggregates, aggregates_global, num_block_rows, num_aggregates);
//analyze_coloring(aggregates, A.getMatrixColoring().getRowColors());
}
template <class T_Config>
void Size8SelectorBase<T_Config>::setAggregates(Matrix<T_Config> &A,
IVector &aggregates, IVector &aggregates_global, int &num_aggregates)
{
if (A.get_block_dimx() == A.get_block_dimy())
{
setAggregates_common_sqblock( A, aggregates, aggregates_global, num_aggregates );
}
else
{
FatalError("Unsupported block size for Size8Selector", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
}
// -------------------------
// Explict instantiations
// -------------------------
#define AMGX_CASE_LINE(CASE) template class Size8SelectorBase<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class Size8Selector<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
}
}
|
c8350fa6b16004f8344797723c6439b7ff6f42ad.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <unistd.h>
#include <vector>
#include <assert.h>
#include <signal.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort = true)
{
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
__global__ void kernel(int number_of_threads,int * managed)
{
int index = blockIdx.x * blockDim.x * blockDim.y * blockDim.z
+ threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
printf("[D] I am %d\n",index );
*managed = 1;
}
int main(int argc, char **argv)
{
int opt, BLOCKS = 1, THREADS = 1, error = 0;
while ((opt = getopt(argc, argv, "b:t:e:")) != -1) {
switch (opt)
{
case 'b':
BLOCKS = atoi(optarg);
break;
case 't':
THREADS = atoi(optarg);
break;
case 'e':
error = atoi(optarg);
break;
default:
fprintf(stderr, "Usage: %s -b [blocks] -t [threads]\n",
argv[0]);
exit(EXIT_FAILURE);
}
}
int * managed;
gpuErrchk(hipMallocManaged((void **) &managed,sizeof(int)));
*managed = 0;
hipLaunchKernelGGL(( kernel) , dim3(BLOCKS), dim3(THREADS) , 0, 0, BLOCKS * THREADS, managed);
if(error){
*managed = 2;
gpuErrchk(hipDeviceSynchronize());
}else{
printf("[H] before hipDeviceSynchronize\n");
gpuErrchk(hipDeviceSynchronize());
assert(*managed != 0);
printf("[H] After hipDeviceSynchronize managed:%d\n",*managed);
*managed = 2;
printf("[H] After cpu access managed:%d\n",*managed);
}
return 0;
}
|
c8350fa6b16004f8344797723c6439b7ff6f42ad.cu
|
#include <stdio.h>
#include <cuda_runtime.h>
#include <unistd.h>
#include <vector>
#include <assert.h>
#include <signal.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true)
{
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
__global__ void kernel(int number_of_threads,int * managed)
{
int index = blockIdx.x * blockDim.x * blockDim.y * blockDim.z
+ threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
printf("[D] I am %d\n",index );
*managed = 1;
}
int main(int argc, char **argv)
{
int opt, BLOCKS = 1, THREADS = 1, error = 0;
while ((opt = getopt(argc, argv, "b:t:e:")) != -1) {
switch (opt)
{
case 'b':
BLOCKS = atoi(optarg);
break;
case 't':
THREADS = atoi(optarg);
break;
case 'e':
error = atoi(optarg);
break;
default:
fprintf(stderr, "Usage: %s -b [blocks] -t [threads]\n",
argv[0]);
exit(EXIT_FAILURE);
}
}
int * managed;
gpuErrchk(cudaMallocManaged((void **) &managed,sizeof(int)));
*managed = 0;
kernel <<< BLOCKS, THREADS >>> (BLOCKS * THREADS, managed);
if(error){
*managed = 2;
gpuErrchk(cudaDeviceSynchronize());
}else{
printf("[H] before cudaDeviceSynchronize\n");
gpuErrchk(cudaDeviceSynchronize());
assert(*managed != 0);
printf("[H] After cudaDeviceSynchronize managed:%d\n",*managed);
*managed = 2;
printf("[H] After cpu access managed:%d\n",*managed);
}
return 0;
}
|
729de094d232cafaf2f975d3b4fafb0cf3c5c975.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* This example demonstrates a matrix-matrix multiplication on the CPU.
*/
void initialData(float *ip, const float ival, int size)
{
for (int i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 100.0f;
}
return;
}
void matmulOnHost(float *A, float *B, float *C, const int N)
{
int id, ida, idb;
float cc;
for (int iy = 0; iy < N; iy++)
{
for (int ix = 0; ix < N; ix++)
{
cc = 0;
for (int k = 0; k < N; k++){
ida = iy*N + k;
idb = k *N + ix;
cc += A[ida]*B[idb];
}
id = iy*N+ix;
C[id] = cc;
}
}
return;
}
__global__ void matmulOnGPU(float *A, float *B, float *C, const int N)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx;
unsigned int ida, idb;
float cc = 0.0f;
if (ix < N && iy < N)
{
for (int k = 0; k < N; k++)
{
ida = iy*N + k;
idb = k *N + ix;
cc += A[ida]*B[idb];
}
}
idx = iy * N + ix;
C[idx] = cc;
}
void printMatrix(float *C, const int nx, const int ny)
{
float *ic = C;
for (int iy = 0; iy < ny; iy++)
{
for (int ix = 0; ix < nx; ix++)
{
printf("%f ", ic[ix]);
}
ic += nx;
printf("\n");
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
int main(int argc, char **argv)
{
// set up data size of matrix
int N = 1 << 5;
int nxy = N * N;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", N, N);
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A,2.0f,nxy);
initialData(h_B,0.5f, nxy);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
double iStart = seconds();
matmulOnHost(h_A, h_B, hostRef, N);
double iElaps = seconds() - iStart;
printf("matmul elapsed %f sec\n", iElaps);
// malloc device global memory
float *d_A, *d_B, *d_C;
CHECK(hipMalloc((void **)&d_A, nBytes));
CHECK(hipMalloc((void **)&d_B, nBytes));
CHECK(hipMalloc((void **)&d_C, nBytes));
// transfer data from host to device
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_C, gpuRef, nBytes, hipMemcpyHostToDevice));
// invoke kernel at host side
int dimx = N;
int dimy = N;
dim3 block(dimx, dimy);
//dim3 grid((N + block.x - 1) / block.x, (N + block.y - 1) / block.y);
dim3 grid(1, 1);
/*
dim3 block(N, N);
dim3 grid(1, 1);
if (N*N > 4096){
block.x = 4096;
block.y = 4096;
grid.x = ceil(double(N)/double(block.x));
grid.y = ceil(double(N)/double(block.y));
}
*/
iStart = seconds();
hipLaunchKernelGGL(( matmulOnGPU), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, N);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("matmulOnGPU <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n", grid.x,
grid.y,
block.x, block.y, iElaps);
// check kernel error
CHECK(hipGetLastError());
// copy kernel result back to host side
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
CHECK(hipDeviceReset());
return (0);
}
|
729de094d232cafaf2f975d3b4fafb0cf3c5c975.cu
|
#include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* This example demonstrates a matrix-matrix multiplication on the CPU.
*/
void initialData(float *ip, const float ival, int size)
{
for (int i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 100.0f;
}
return;
}
void matmulOnHost(float *A, float *B, float *C, const int N)
{
int id, ida, idb;
float cc;
for (int iy = 0; iy < N; iy++)
{
for (int ix = 0; ix < N; ix++)
{
cc = 0;
for (int k = 0; k < N; k++){
ida = iy*N + k;
idb = k *N + ix;
cc += A[ida]*B[idb];
}
id = iy*N+ix;
C[id] = cc;
}
}
return;
}
__global__ void matmulOnGPU(float *A, float *B, float *C, const int N)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx;
unsigned int ida, idb;
float cc = 0.0f;
if (ix < N && iy < N)
{
for (int k = 0; k < N; k++)
{
ida = iy*N + k;
idb = k *N + ix;
cc += A[ida]*B[idb];
}
}
idx = iy * N + ix;
C[idx] = cc;
}
void printMatrix(float *C, const int nx, const int ny)
{
float *ic = C;
for (int iy = 0; iy < ny; iy++)
{
for (int ix = 0; ix < nx; ix++)
{
printf("%f ", ic[ix]);
}
ic += nx;
printf("\n");
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
int main(int argc, char **argv)
{
// set up data size of matrix
int N = 1 << 5;
int nxy = N * N;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", N, N);
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A,2.0f,nxy);
initialData(h_B,0.5f, nxy);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
double iStart = seconds();
matmulOnHost(h_A, h_B, hostRef, N);
double iElaps = seconds() - iStart;
printf("matmul elapsed %f sec\n", iElaps);
// malloc device global memory
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((void **)&d_A, nBytes));
CHECK(cudaMalloc((void **)&d_B, nBytes));
CHECK(cudaMalloc((void **)&d_C, nBytes));
// transfer data from host to device
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_C, gpuRef, nBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
int dimx = N;
int dimy = N;
dim3 block(dimx, dimy);
//dim3 grid((N + block.x - 1) / block.x, (N + block.y - 1) / block.y);
dim3 grid(1, 1);
/*
dim3 block(N, N);
dim3 grid(1, 1);
if (N*N > 4096){
block.x = 4096;
block.y = 4096;
grid.x = ceil(double(N)/double(block.x));
grid.y = ceil(double(N)/double(block.y));
}
*/
iStart = seconds();
matmulOnGPU<<<grid, block>>>(d_A, d_B, d_C, N);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("matmulOnGPU <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n", grid.x,
grid.y,
block.x, block.y, iElaps);
// check kernel error
CHECK(cudaGetLastError());
// copy kernel result back to host side
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
CHECK(cudaDeviceReset());
return (0);
}
|
2921da2df79e96177f8183ca8e1e2875e73418b8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iomanip>
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <time.h>
#include <vector>
#include "EasyBMP.h"
#define CUDA_CALL(x) do { hipError_t err = x; if (( err ) != hipSuccess ) { \
printf ("Error \"%s\" at %s :%d \n" , hipGetErrorString(err), \
__FILE__ , __LINE__ ) ; exit(-1); \
}} while (0)
using namespace std;
#define ARR(T, i, j) (T[(i) + (j) * width])
__device__ inline void interpolate(
const RGBApixel* pixels, RGBApixel& output, int width, float x, float y)
{
int px = (int)x; // floor of x
int py = (int)y; // floor of y
const int stride = width;
const RGBApixel* p0 = &pixels[0] + px + py * stride; // pointer to first pixel
// load the four neighboring pixels
const RGBApixel& p1 = p0[0 + 0 * stride];
const RGBApixel& p2 = p0[1 + 0 * stride];
const RGBApixel& p3 = p0[0 + 1 * stride];
const RGBApixel& p4 = p0[1 + 1 * stride];
// Calculate the weights for each pixel
float fx = x - px;
float fy = y - py;
float fx1 = 1.0f - fx;
float fy1 = 1.0f - fy;
int w1 = fx1 * fy1 * 256.0f + 0.5f;
int w2 = fx * fy1 * 256.0f + 0.5f;
int w3 = fx1 * fy * 256.0f + 0.5f;
int w4 = fx * fy * 256.0f + 0.5f;
// Calculate the weighted sum of pixels (for each color channel)
int outr = p1.Red * w1 + p2.Red * w2 + p3.Red * w3 + p4.Red * w4;
int outg = p1.Green * w1 + p2.Green * w2 + p3.Green * w3 + p4.Green * w4;
int outb = p1.Blue * w1 + p2.Blue * w2 + p3.Blue * w3 + p4.Blue * w4;
int outa = p1.Alpha * w1 + p2.Alpha * w2 + p3.Alpha * w3 + p4.Alpha * w4;
output.Red = (outr + 128) >> 8;
output.Green = (outg + 128) >> 8;
output.Blue = (outb + 128) >> 8;
output.Alpha = (outa + 128) >> 8;
}
__global__ void bilinear (const int width, const int height,
RGBApixel* input, RGBApixel* output)
{
int j = blockDim.y * blockIdx.y + threadIdx.y;
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (j >= 2 * height) return;
if (i >= 2 * width) return;
float x = width * (i - 0.5f) / (float)(2 * width);
float y = height * (j - 0.5f) / (float)(2 * height);
interpolate(input, output[i + j * 2 * width], width, x, y);
}
// Get the timer value.
static void get_time(volatile struct timespec* val)
{
clock_gettime(CLOCK_REALTIME, (struct timespec*)val);
}
// Get the timer measured values difference.
static double get_time_diff(struct timespec* val1, struct timespec* val2)
{
int64_t seconds = val2->tv_sec - val1->tv_sec;
int64_t nanoseconds = val2->tv_nsec - val1->tv_nsec;
if (val2->tv_nsec < val1->tv_nsec)
{
seconds--;
nanoseconds = (1000000000 - val1->tv_nsec) + val2->tv_nsec;
}
return (double)0.000000001 * nanoseconds + seconds;
}
int main(int argc, char* argv[])
{
if (argc != 2)
{
cout << "Usage: " << argv[0] << " <filename>" << endl;
return 0;
}
char* filename = argv[1];
BMP AnImage;
AnImage.ReadFromFile(filename);
int width = AnImage.TellWidth();
int height = AnImage.TellHeight();
vector<RGBApixel> input(width * (height + 1) + 1);
vector<RGBApixel> output(4 * width * height);
for (int i = 0; i < width; i++)
for (int j = 0; j < height; j++)
input[i + j * width] = AnImage.GetPixel(i, j);
memset(&input[height * width], 0, (width + 1) * sizeof(RGBApixel));
RGBApixel *dinput, *doutput;
CUDA_CALL(hipMalloc(&dinput, sizeof(RGBApixel) * input.size()));
CUDA_CALL(hipMalloc(&doutput, sizeof(RGBApixel) * output.size()));
CUDA_CALL(hipMemcpy(dinput, &input[0], sizeof(RGBApixel) * input.size(), hipMemcpyHostToDevice));
struct timespec start;
get_time(&start);
dim3 szblock(128, 1, 1);
dim3 nblocks(2 * width / szblock.x, 2 * height, 1);
if (2 * width % szblock.x) nblocks.x++;
hipLaunchKernelGGL(( bilinear), dim3(nblocks), dim3(szblock), 0, 0, width, height, dinput, doutput);
CUDA_CALL(hipGetLastError());
CUDA_CALL(hipDeviceSynchronize());
struct timespec finish;
get_time(&finish);
printf("GPU kernel time = %f sec\n", get_time_diff(&start, &finish));
CUDA_CALL(hipMemcpy(&output[0], doutput, sizeof(RGBApixel) * output.size(), hipMemcpyDeviceToHost));
AnImage.SetSize(2 * width, 2 * height);
for (int i = 0; i < 2 * width; i++)
for (int j = 0; j < 2 * height; j++)
AnImage.SetPixel(i, j, output[i + j * 2 * width]);
AnImage.WriteToFile("output_gpu.bmp");
return 0;
}
|
2921da2df79e96177f8183ca8e1e2875e73418b8.cu
|
#include <iomanip>
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <time.h>
#include <vector>
#include "EasyBMP.h"
#define CUDA_CALL(x) do { cudaError_t err = x; if (( err ) != cudaSuccess ) { \
printf ("Error \"%s\" at %s :%d \n" , cudaGetErrorString(err), \
__FILE__ , __LINE__ ) ; exit(-1); \
}} while (0)
using namespace std;
#define ARR(T, i, j) (T[(i) + (j) * width])
__device__ inline void interpolate(
const RGBApixel* pixels, RGBApixel& output, int width, float x, float y)
{
int px = (int)x; // floor of x
int py = (int)y; // floor of y
const int stride = width;
const RGBApixel* p0 = &pixels[0] + px + py * stride; // pointer to first pixel
// load the four neighboring pixels
const RGBApixel& p1 = p0[0 + 0 * stride];
const RGBApixel& p2 = p0[1 + 0 * stride];
const RGBApixel& p3 = p0[0 + 1 * stride];
const RGBApixel& p4 = p0[1 + 1 * stride];
// Calculate the weights for each pixel
float fx = x - px;
float fy = y - py;
float fx1 = 1.0f - fx;
float fy1 = 1.0f - fy;
int w1 = fx1 * fy1 * 256.0f + 0.5f;
int w2 = fx * fy1 * 256.0f + 0.5f;
int w3 = fx1 * fy * 256.0f + 0.5f;
int w4 = fx * fy * 256.0f + 0.5f;
// Calculate the weighted sum of pixels (for each color channel)
int outr = p1.Red * w1 + p2.Red * w2 + p3.Red * w3 + p4.Red * w4;
int outg = p1.Green * w1 + p2.Green * w2 + p3.Green * w3 + p4.Green * w4;
int outb = p1.Blue * w1 + p2.Blue * w2 + p3.Blue * w3 + p4.Blue * w4;
int outa = p1.Alpha * w1 + p2.Alpha * w2 + p3.Alpha * w3 + p4.Alpha * w4;
output.Red = (outr + 128) >> 8;
output.Green = (outg + 128) >> 8;
output.Blue = (outb + 128) >> 8;
output.Alpha = (outa + 128) >> 8;
}
__global__ void bilinear (const int width, const int height,
RGBApixel* input, RGBApixel* output)
{
int j = blockDim.y * blockIdx.y + threadIdx.y;
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (j >= 2 * height) return;
if (i >= 2 * width) return;
float x = width * (i - 0.5f) / (float)(2 * width);
float y = height * (j - 0.5f) / (float)(2 * height);
interpolate(input, output[i + j * 2 * width], width, x, y);
}
// Get the timer value.
static void get_time(volatile struct timespec* val)
{
clock_gettime(CLOCK_REALTIME, (struct timespec*)val);
}
// Get the timer measured values difference.
static double get_time_diff(struct timespec* val1, struct timespec* val2)
{
int64_t seconds = val2->tv_sec - val1->tv_sec;
int64_t nanoseconds = val2->tv_nsec - val1->tv_nsec;
if (val2->tv_nsec < val1->tv_nsec)
{
seconds--;
nanoseconds = (1000000000 - val1->tv_nsec) + val2->tv_nsec;
}
return (double)0.000000001 * nanoseconds + seconds;
}
int main(int argc, char* argv[])
{
if (argc != 2)
{
cout << "Usage: " << argv[0] << " <filename>" << endl;
return 0;
}
char* filename = argv[1];
BMP AnImage;
AnImage.ReadFromFile(filename);
int width = AnImage.TellWidth();
int height = AnImage.TellHeight();
vector<RGBApixel> input(width * (height + 1) + 1);
vector<RGBApixel> output(4 * width * height);
for (int i = 0; i < width; i++)
for (int j = 0; j < height; j++)
input[i + j * width] = AnImage.GetPixel(i, j);
memset(&input[height * width], 0, (width + 1) * sizeof(RGBApixel));
RGBApixel *dinput, *doutput;
CUDA_CALL(cudaMalloc(&dinput, sizeof(RGBApixel) * input.size()));
CUDA_CALL(cudaMalloc(&doutput, sizeof(RGBApixel) * output.size()));
CUDA_CALL(cudaMemcpy(dinput, &input[0], sizeof(RGBApixel) * input.size(), cudaMemcpyHostToDevice));
struct timespec start;
get_time(&start);
dim3 szblock(128, 1, 1);
dim3 nblocks(2 * width / szblock.x, 2 * height, 1);
if (2 * width % szblock.x) nblocks.x++;
bilinear<<<nblocks, szblock>>>(width, height, dinput, doutput);
CUDA_CALL(cudaGetLastError());
CUDA_CALL(cudaDeviceSynchronize());
struct timespec finish;
get_time(&finish);
printf("GPU kernel time = %f sec\n", get_time_diff(&start, &finish));
CUDA_CALL(cudaMemcpy(&output[0], doutput, sizeof(RGBApixel) * output.size(), cudaMemcpyDeviceToHost));
AnImage.SetSize(2 * width, 2 * height);
for (int i = 0; i < 2 * width; i++)
for (int j = 0; j < 2 * height; j++)
AnImage.SetPixel(i, j, output[i + j * 2 * width]);
AnImage.WriteToFile("output_gpu.bmp");
return 0;
}
|
651bc76b0481c3ed31dd5b8a5b38d5ef9cac645e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <time.h>
// includes, kernels
#include "vector_dot_product_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// declare functions pre-maturely
void run_test(unsigned int);
float compute_on_device(float *, float *,int);
void check_for_error(char *);
extern "C" float compute_gold( float *, float *, unsigned int);
// define main function
int main( int argc, char** argv)
{
if(argc != 2){
printf("Usage: vector_dot_product <num elements> \n");
exit(0);
}
unsigned int num_elements = atoi(argv[1]);
run_test(num_elements);
return 0;
}
void run_test(unsigned int num_elements)
{
// Obtain the vector length
unsigned int size = sizeof(float) * num_elements;
// Allocate memory on the CPU for the input vectors A and B
float *A = (float *)malloc(size);
float *B = (float *)malloc(size);
// Randomly generate input data. Initialize the input data to be floating point values between [-.5 , 5]
printf("Generating random vectors with values between [-.5, .5]. \n");
// seed the pseudo random number generator
srand(time(0));
// popuplate vector A and B
for(unsigned int i = 0; i < num_elements; i++){
A[i] = (float)rand()/(float)RAND_MAX - 0.5;
B[i] = (float)rand()/(float)RAND_MAX - 0.5;
}
printf("Generating dot product on the CPU. \n");
// Compute the reference solution on the CPU
float reference = compute_gold(A, B, num_elements);
float gpu_result = compute_on_device(A, B, num_elements);
printf("Result on CPU: %f, result on GPU (using %d threads per block): %f. \n", reference, BLOCK_SIZE, gpu_result);
// cleanup memory
free(A);
free(B);
return;
}
/* Edit this function to compute the dot product on the device using atomic intrinsics. */
float compute_on_device(float *A_on_host, float *B_on_host, int num_elements)
{
float *A_on_device = NULL;
float *B_on_device = NULL;
float *C_on_device = NULL;
/* alloc space on device for initial vectors, copy data */
hipMalloc( (void **)&A_on_device, num_elements * sizeof(float) );
hipMemcpy( A_on_device, A_on_host, num_elements * sizeof(float), hipMemcpyHostToDevice);
hipMalloc( (void **)&B_on_device, num_elements * sizeof(float) );
hipMemcpy( B_on_device, B_on_host, num_elements * sizeof(float), hipMemcpyHostToDevice);
/* alloc space for result, copy data */
hipMalloc( (void **)&C_on_device, GRID_SIZE * sizeof(float) ); // is vector instead of single val for testing purposes
hipMemset( C_on_device, 0.0f, GRID_SIZE * sizeof(float) );
/* mutex for sync */
int *mutex = NULL;
hipMalloc((void **)&mutex, sizeof(int));
hipMemset(mutex, 0, sizeof(int));
/* Define grid parameters for GPU */
dim3 thread_block(BLOCK_SIZE, 1, 1);
dim3 grid(GRID_SIZE,1);
/* Launch kernel, sync ( for timing purposes ) */
hipLaunchKernelGGL(( vector_dot_product_kernel) , dim3(grid), dim3(thread_block) , 0, 0, A_on_device, B_on_device, C_on_device, num_elements,mutex);
hipDeviceSynchronize();
check_for_error("KERNEL FAILURE");
/* copy result back to host */
float *C_host = (float *) malloc(GRID_SIZE*sizeof(float));
float result = 0.0f;
hipMemcpy( &result, C_on_device, sizeof(float), hipMemcpyDeviceToHost );
/* Free mem on GPU */
hipFree(A_on_device);
hipFree(B_on_device);
hipFree(C_on_device);
return result;
}
// This function checks for errors returned by the CUDA run time
void check_for_error(char *msg){
hipError_t err = hipGetLastError();
if(hipSuccess != err){
printf("CUDA ERROR: %s (%s). \n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
651bc76b0481c3ed31dd5b8a5b38d5ef9cac645e.cu
|
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <time.h>
// includes, kernels
#include "vector_dot_product_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// declare functions pre-maturely
void run_test(unsigned int);
float compute_on_device(float *, float *,int);
void check_for_error(char *);
extern "C" float compute_gold( float *, float *, unsigned int);
// define main function
int main( int argc, char** argv)
{
if(argc != 2){
printf("Usage: vector_dot_product <num elements> \n");
exit(0);
}
unsigned int num_elements = atoi(argv[1]);
run_test(num_elements);
return 0;
}
void run_test(unsigned int num_elements)
{
// Obtain the vector length
unsigned int size = sizeof(float) * num_elements;
// Allocate memory on the CPU for the input vectors A and B
float *A = (float *)malloc(size);
float *B = (float *)malloc(size);
// Randomly generate input data. Initialize the input data to be floating point values between [-.5 , 5]
printf("Generating random vectors with values between [-.5, .5]. \n");
// seed the pseudo random number generator
srand(time(0));
// popuplate vector A and B
for(unsigned int i = 0; i < num_elements; i++){
A[i] = (float)rand()/(float)RAND_MAX - 0.5;
B[i] = (float)rand()/(float)RAND_MAX - 0.5;
}
printf("Generating dot product on the CPU. \n");
// Compute the reference solution on the CPU
float reference = compute_gold(A, B, num_elements);
float gpu_result = compute_on_device(A, B, num_elements);
printf("Result on CPU: %f, result on GPU (using %d threads per block): %f. \n", reference, BLOCK_SIZE, gpu_result);
// cleanup memory
free(A);
free(B);
return;
}
/* Edit this function to compute the dot product on the device using atomic intrinsics. */
float compute_on_device(float *A_on_host, float *B_on_host, int num_elements)
{
float *A_on_device = NULL;
float *B_on_device = NULL;
float *C_on_device = NULL;
/* alloc space on device for initial vectors, copy data */
cudaMalloc( (void **)&A_on_device, num_elements * sizeof(float) );
cudaMemcpy( A_on_device, A_on_host, num_elements * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc( (void **)&B_on_device, num_elements * sizeof(float) );
cudaMemcpy( B_on_device, B_on_host, num_elements * sizeof(float), cudaMemcpyHostToDevice);
/* alloc space for result, copy data */
cudaMalloc( (void **)&C_on_device, GRID_SIZE * sizeof(float) ); // is vector instead of single val for testing purposes
cudaMemset( C_on_device, 0.0f, GRID_SIZE * sizeof(float) );
/* mutex for sync */
int *mutex = NULL;
cudaMalloc((void **)&mutex, sizeof(int));
cudaMemset(mutex, 0, sizeof(int));
/* Define grid parameters for GPU */
dim3 thread_block(BLOCK_SIZE, 1, 1);
dim3 grid(GRID_SIZE,1);
/* Launch kernel, sync ( for timing purposes ) */
vector_dot_product_kernel <<< grid, thread_block >>> (A_on_device, B_on_device, C_on_device, num_elements,mutex);
cudaThreadSynchronize();
check_for_error("KERNEL FAILURE");
/* copy result back to host */
float *C_host = (float *) malloc(GRID_SIZE*sizeof(float));
float result = 0.0f;
cudaMemcpy( &result, C_on_device, sizeof(float), cudaMemcpyDeviceToHost );
/* Free mem on GPU */
cudaFree(A_on_device);
cudaFree(B_on_device);
cudaFree(C_on_device);
return result;
}
// This function checks for errors returned by the CUDA run time
void check_for_error(char *msg){
cudaError_t err = cudaGetLastError();
if(cudaSuccess != err){
printf("CUDA ERROR: %s (%s). \n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
9ac6b81877348aa47391671c307b5eece5adc1a8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __CUDNN__
#include "Concatenate.hpp"
// template class ConcatenateChannelWise<int>;
template class ConcatenateChannelWise<float>;
// template class ConcatenateChannelWise<double>;
__global__ void ConcatenateChannelWise_ForwardPropagate_kernel(int sizeOfResultImg, int sizeOfInputImg, int timesize, int batchsize, float *result, float *input, int preSize) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < sizeOfInputImg; idx += blockDim.x * gridDim.x) {
for (int ba = 0; ba < batchsize; ba++) {
result[ba * sizeOfResultImg + idx + preSize] = input[ba * sizeOfInputImg + idx];
}
}
}
template<typename DTYPE> int ConcatenateChannelWise<DTYPE>::ForwardPropagateOnGPU(int pTime) {
int noBlock = 3, threadsPerBlock = 128;
Tensor<DTYPE> *result = this->GetResult();
Tensor<DTYPE> *input = NULL;
int timesize = result->GetTimeSize();
int batchsize = result->GetBatchSize();
int channelsize = result->GetChannelSize();
int rowsize = result->GetRowSize();
int colsize = result->GetColSize();
Shape *resultTenShape = result->GetShape();
int sizeOfPlane = rowsize * colsize;
int sizeOfResultImg = channelsize * sizeOfPlane;
int sizeOfInputImg = 0;
DTYPE *result_gpu = result->GetGPUData();
DTYPE *input_gpu = NULL;
int preSize = 0;
int inputChannelSize = 0;
for (int opnum = 0; opnum < m_noOperator; opnum++) {
input = this->GetInput()[opnum]->GetResult();
input_gpu = input->GetGPUData();
inputChannelSize = input->GetChannelSize();
preSize = m_aAccumulate[opnum] * sizeOfPlane;
sizeOfInputImg = inputChannelSize * sizeOfPlane;
// std::cout << "check" << '\n';
GetKernelParameters(sizeOfInputImg, &noBlock, &threadsPerBlock);
// printf("%d, %d\n", noBlock, threadsPerBlock);
ConcatenateChannelWise_ForwardPropagate_kernel << < 64, 128 >> > (sizeOfResultImg, sizeOfInputImg, timesize, batchsize, result_gpu, input_gpu, preSize);
}
return TRUE;
}
__global__ void ConcatenateChannelWise_BackPropagate_kernel(int sizeOfResultImg, int sizeOfInputImg, int timesize, int batchsize, float *delta_gpu, float *input_delta_gpu, int preSize) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < sizeOfInputImg; idx += blockDim.x * gridDim.x) {
for (int ba = 0; ba < batchsize; ba++) {
input_delta_gpu[ba * sizeOfInputImg + idx] += delta_gpu[ba * sizeOfResultImg + idx + preSize];
}
}
}
template<typename DTYPE> int ConcatenateChannelWise<DTYPE>::BackPropagateOnGPU(int pTime) {
Tensor<DTYPE> *this_delta = this->GetDelta();
Tensor<DTYPE> *input_delta = NULL;
int timesize = this_delta->GetTimeSize();
int batchsize = this_delta->GetBatchSize();
int channelsize = this_delta->GetChannelSize();
int rowsize = this_delta->GetRowSize();
int colsize = this_delta->GetColSize();
Shape *resultTenShape = this_delta->GetShape();
int sizeOfPlane = rowsize * colsize;
int sizeOfResultImg = channelsize * sizeOfPlane;
int sizeOfInputImg = 0;
DTYPE *delta_gpu = this_delta->GetGPUData();
DTYPE *input_delta_gpu = NULL;
int preSize = 0;
int inputChannelSize = 0;
for (int opnum = 0; opnum < m_noOperator; opnum++) {
input_delta = this->GetInput()[opnum]->GetDelta();
input_delta_gpu = input_delta->GetGPUData();
inputChannelSize = input_delta->GetChannelSize();
preSize = m_aAccumulate[opnum] * sizeOfPlane;;
sizeOfInputImg = inputChannelSize * sizeOfPlane;
ConcatenateChannelWise_BackPropagate_kernel << < 64, 128 >> > (sizeOfResultImg, sizeOfInputImg, timesize, batchsize, delta_gpu, input_delta_gpu, preSize);
}
return TRUE;
}
#endif // ifdef __CUDNN__
|
9ac6b81877348aa47391671c307b5eece5adc1a8.cu
|
#ifdef __CUDNN__
#include "Concatenate.hpp"
// template class ConcatenateChannelWise<int>;
template class ConcatenateChannelWise<float>;
// template class ConcatenateChannelWise<double>;
__global__ void ConcatenateChannelWise_ForwardPropagate_kernel(int sizeOfResultImg, int sizeOfInputImg, int timesize, int batchsize, float *result, float *input, int preSize) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < sizeOfInputImg; idx += blockDim.x * gridDim.x) {
for (int ba = 0; ba < batchsize; ba++) {
result[ba * sizeOfResultImg + idx + preSize] = input[ba * sizeOfInputImg + idx];
}
}
}
template<typename DTYPE> int ConcatenateChannelWise<DTYPE>::ForwardPropagateOnGPU(int pTime) {
int noBlock = 3, threadsPerBlock = 128;
Tensor<DTYPE> *result = this->GetResult();
Tensor<DTYPE> *input = NULL;
int timesize = result->GetTimeSize();
int batchsize = result->GetBatchSize();
int channelsize = result->GetChannelSize();
int rowsize = result->GetRowSize();
int colsize = result->GetColSize();
Shape *resultTenShape = result->GetShape();
int sizeOfPlane = rowsize * colsize;
int sizeOfResultImg = channelsize * sizeOfPlane;
int sizeOfInputImg = 0;
DTYPE *result_gpu = result->GetGPUData();
DTYPE *input_gpu = NULL;
int preSize = 0;
int inputChannelSize = 0;
for (int opnum = 0; opnum < m_noOperator; opnum++) {
input = this->GetInput()[opnum]->GetResult();
input_gpu = input->GetGPUData();
inputChannelSize = input->GetChannelSize();
preSize = m_aAccumulate[opnum] * sizeOfPlane;
sizeOfInputImg = inputChannelSize * sizeOfPlane;
// std::cout << "check" << '\n';
GetKernelParameters(sizeOfInputImg, &noBlock, &threadsPerBlock);
// printf("%d, %d\n", noBlock, threadsPerBlock);
ConcatenateChannelWise_ForwardPropagate_kernel << < 64, 128 >> > (sizeOfResultImg, sizeOfInputImg, timesize, batchsize, result_gpu, input_gpu, preSize);
}
return TRUE;
}
__global__ void ConcatenateChannelWise_BackPropagate_kernel(int sizeOfResultImg, int sizeOfInputImg, int timesize, int batchsize, float *delta_gpu, float *input_delta_gpu, int preSize) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < sizeOfInputImg; idx += blockDim.x * gridDim.x) {
for (int ba = 0; ba < batchsize; ba++) {
input_delta_gpu[ba * sizeOfInputImg + idx] += delta_gpu[ba * sizeOfResultImg + idx + preSize];
}
}
}
template<typename DTYPE> int ConcatenateChannelWise<DTYPE>::BackPropagateOnGPU(int pTime) {
Tensor<DTYPE> *this_delta = this->GetDelta();
Tensor<DTYPE> *input_delta = NULL;
int timesize = this_delta->GetTimeSize();
int batchsize = this_delta->GetBatchSize();
int channelsize = this_delta->GetChannelSize();
int rowsize = this_delta->GetRowSize();
int colsize = this_delta->GetColSize();
Shape *resultTenShape = this_delta->GetShape();
int sizeOfPlane = rowsize * colsize;
int sizeOfResultImg = channelsize * sizeOfPlane;
int sizeOfInputImg = 0;
DTYPE *delta_gpu = this_delta->GetGPUData();
DTYPE *input_delta_gpu = NULL;
int preSize = 0;
int inputChannelSize = 0;
for (int opnum = 0; opnum < m_noOperator; opnum++) {
input_delta = this->GetInput()[opnum]->GetDelta();
input_delta_gpu = input_delta->GetGPUData();
inputChannelSize = input_delta->GetChannelSize();
preSize = m_aAccumulate[opnum] * sizeOfPlane;;
sizeOfInputImg = inputChannelSize * sizeOfPlane;
ConcatenateChannelWise_BackPropagate_kernel << < 64, 128 >> > (sizeOfResultImg, sizeOfInputImg, timesize, batchsize, delta_gpu, input_delta_gpu, preSize);
}
return TRUE;
}
#endif // ifdef __CUDNN__
|
2843c7f47312315776ce4fd6e568677795213bba.hip
|
// !!! This is a file automatically generated by hipify!!!
// #include "../common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#define CHECK
#include<sys/time.h>
double seconds(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6);
}
/*
* This example demonstrates a simple vector sum on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector sum across CUDA threads on the
* GPU. A 1D thread block and 1D grid are used. sumArraysOnHost sequentially
* iterates through vector elements on the host.
*/
void initialData(float *ip, const int size)
{
int i;
for(i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF ) / 10.0f;
}
return;
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx,
const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int iy = 0; iy < ny; iy++)
{
for (int ix = 0; ix < nx; ix++)
{
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// grid 1D block 1D
__global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx,
int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < nx )
for (int iy = 0; iy < ny; iy++)
{
int idx = iy * nx + ix;
MatC[idx] = MatA[idx] + MatB[idx];
}
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// set up data size of matrix
int nx = 1 << 14;
int ny = 1 << 14;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
double iStart = seconds();
initialData(h_A, nxy);
initialData(h_B, nxy);
double iElaps = seconds() - iStart;
printf("initialize matrix elapsed %f sec\n", iElaps);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add matrix at host side for result checks
iStart = seconds();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps = seconds() - iStart;
printf("sumMatrixOnHost elapsed %f sec\n", iElaps);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC;
CHECK(hipMalloc((void **)&d_MatA, nBytes));
CHECK(hipMalloc((void **)&d_MatB, nBytes));
CHECK(hipMalloc((void **)&d_MatC, nBytes));
// transfer data from host to device
CHECK(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice));
// invoke kernel at host side
int dimx = 32;
dim3 block(dimx, 1);
dim3 grid((nx + block.x - 1) / block.x, 1);
iStart = seconds();
hipLaunchKernelGGL(( sumMatrixOnGPU1D), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("sumMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n", grid.x,
grid.y,
block.x, block.y, iElaps);
// check kernel error
CHECK(hipGetLastError());
// copy kernel result back to host side
CHECK(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
CHECK(hipFree(d_MatA));
CHECK(hipFree(d_MatB));
CHECK(hipFree(d_MatC));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
CHECK(hipDeviceReset());
return (0);
}
|
2843c7f47312315776ce4fd6e568677795213bba.cu
|
// #include "../common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
#define CHECK
#include<sys/time.h>
double seconds(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6);
}
/*
* This example demonstrates a simple vector sum on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector sum across CUDA threads on the
* GPU. A 1D thread block and 1D grid are used. sumArraysOnHost sequentially
* iterates through vector elements on the host.
*/
void initialData(float *ip, const int size)
{
int i;
for(i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF ) / 10.0f;
}
return;
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx,
const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int iy = 0; iy < ny; iy++)
{
for (int ix = 0; ix < nx; ix++)
{
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// grid 1D block 1D
__global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx,
int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < nx )
for (int iy = 0; iy < ny; iy++)
{
int idx = iy * nx + ix;
MatC[idx] = MatA[idx] + MatB[idx];
}
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size of matrix
int nx = 1 << 14;
int ny = 1 << 14;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
double iStart = seconds();
initialData(h_A, nxy);
initialData(h_B, nxy);
double iElaps = seconds() - iStart;
printf("initialize matrix elapsed %f sec\n", iElaps);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add matrix at host side for result checks
iStart = seconds();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps = seconds() - iStart;
printf("sumMatrixOnHost elapsed %f sec\n", iElaps);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC;
CHECK(cudaMalloc((void **)&d_MatA, nBytes));
CHECK(cudaMalloc((void **)&d_MatB, nBytes));
CHECK(cudaMalloc((void **)&d_MatC, nBytes));
// transfer data from host to device
CHECK(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
int dimx = 32;
dim3 block(dimx, 1);
dim3 grid((nx + block.x - 1) / block.x, 1);
iStart = seconds();
sumMatrixOnGPU1D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("sumMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n", grid.x,
grid.y,
block.x, block.y, iElaps);
// check kernel error
CHECK(cudaGetLastError());
// copy kernel result back to host side
CHECK(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
CHECK(cudaFree(d_MatA));
CHECK(cudaFree(d_MatB));
CHECK(cudaFree(d_MatC));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
CHECK(cudaDeviceReset());
return (0);
}
|
d5d5c5f6cdc458ad4f4384055eab58dbd312ff0f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-------------------------------------------------------------------------
*
* MATLAB MEX functions for TV image denoising. Check inputs and parses
* MATLAB data to C++ data.
*
*
* CODE by Imanol Luengo
* PhD student University of Nottingham
* [email protected]
* 2015
* Modified by Ander Biguri for multi-GPU
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
// http://gpu4vision.icg.tugraz.at/papers/2010/knoll.pdf#pub47
#define MAXTREADS 1024
#define MAX_BUFFER 60
#define BLOCK_SIZE 10 // BLOCK_SIZE^3 must be smaller than MAXTREADS
#include "tvdenoising.hpp"
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
hipDeviceReset();\
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:TVdenoising",hipGetErrorString(__err));\
} \
} while (0)
__device__ __inline__
float divergence(const float* pz, const float* py, const float* px,
long z, long y, long x, long depth, long rows, long cols,
float dz, float dy, float dx)
{
long size2d = rows*cols;
long idx = z * size2d + y * cols + x;
float _div = 0.0f;
if ( z - 1 >= 0 ) {
_div += (pz[idx] - pz[(z-1)*size2d + y*cols + x]) / dz;
} else {
_div += pz[idx];
}
if ( y - 1 >= 0 ) {
_div += (py[idx] - py[z*size2d + (y-1)*cols + x]) / dy;
} else {
_div += py[idx];
}
if ( x - 1 >= 0 ) {
_div += (px[idx] - px[z*size2d + y*cols + (x-1)]) / dx;
} else {
_div += px[idx];
}
return _div;
}
__device__ __inline__
void gradient(const float* u, float* grad,
long z, long y, long x,
long depth, long rows, long cols,
float dz, float dy, float dx)
{
long size2d = rows*cols;
long idx = z * size2d + y * cols + x;
float uidx = u[idx];
if ( z + 1 < depth ) {
grad[0] = (u[(z+1)*size2d + y*cols + x] - uidx) / dz;
}
if ( y + 1 < rows ) {
grad[1] = (u[z*size2d + (y+1)*cols + x] - uidx) / dy;
}
if ( x + 1 < cols ) {
grad[2] = (u[z*size2d + y*cols + (x+1)] - uidx) / dx;
}
}
__global__
void update_u(const float* f, const float* pz, const float* py, const float* px, float* u,
float tau, float lambda,
long depth, long rows, long cols,
float dz, float dy, float dx)
{
long x = threadIdx.x + blockIdx.x * blockDim.x;
long y = threadIdx.y + blockIdx.y * blockDim.y;
long z = threadIdx.z + blockIdx.z * blockDim.z;
long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float _div = divergence(pz, py, px, z, y, x, depth, rows, cols, dz, dy, dx);
u[idx] = u[idx] * (1.0f - tau) + tau * (f[idx] + (1.0f/lambda) * _div);
}
__global__
void update_p(const float* u, float* pz, float* py, float* px,
float tau, long depth, long rows, long cols,
float dz, float dy, float dx)
{
long x = threadIdx.x + blockIdx.x * blockDim.x;
long y = threadIdx.y + blockIdx.y * blockDim.y;
long z = threadIdx.z + blockIdx.z * blockDim.z;
long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float grad[3] = {0,0,0}, q[3];
gradient(u, grad, z, y, x, depth, rows, cols, dz, dy, dx);
q[0] = pz[idx] + tau * grad[0];
q[1] = py[idx] + tau * grad[1];
q[2] = px[idx] + tau * grad[2];
float norm = fmaxf(1.0f, sqrtf(q[0] * q[0] + q[1] * q[1] + q[2] * q[2]));
pz[idx] = q[0] / norm;
py[idx] = q[1] / norm;
px[idx] = q[2] / norm;
}
// Main function
void tvdenoising(float* src, float* dst, float lambda,
const float* spacing, const long* image_size, int maxIter){
// Prepare for MultiGPU
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPUselect","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning trhown)
int dev;
const int devicenamelength = 256; // The length 256 is fixed by spec of hipDeviceProp_t::name
char devicename[devicenamelength];
hipDeviceProp_t deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
hipSetDevice(dev);
hipGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicename,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("tvDenoise:tvdenoising:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n POCS_TV.cu line 277.");
break;
}
}
memset(devicename, 0, devicenamelength);
strcpy(devicename, deviceProp.name);
}
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
// %5 of free memory shoudl be enough, we have almsot no variables in these kernels
size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ;
size_t mem_slice_image = sizeof(float)* image_size[0] * image_size[1] ;
size_t mem_size_image = sizeof(float)* total_pixels;
// Decide how are we handling the distribution of computation
size_t mem_img_each_GPU;
unsigned int buffer_length=1;
//Does everything fit in the GPU?
unsigned int slices_per_split;
unsigned int splits=1; // if the number does not fit in an uint, you have more serious trouble than this.
if(mem_GPU_global> 5*mem_size_image+5*mem_slice_image*buffer_length*2){
// We only need to split if we have extra GPUs
slices_per_split=(image_size[2]+deviceCount-1)/deviceCount;
mem_img_each_GPU=mem_slice_image*( (image_size[2]+deviceCount-1)/deviceCount + buffer_length*2);
}else{
// As mem_auxiliary is not expected to be a large value (for a 2000^3 image is around 28Mbytes), lets for now assume we need it all
size_t mem_free=mem_GPU_global;
splits=(unsigned int)(ceil(((float)(5*mem_size_image)/(float)(deviceCount))/mem_free));
// Now, there is an overhead here, as each splits should have 2 slices more, to accoutn for overlap of images.
// lets make sure these 2 slices fit, if they do not, add 1 to splits.
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// if the new stuff does not fit in the GPU, it measn we are in the edge case where adding that extra slice will overflow memory
if (mem_GPU_global< 5*mem_img_each_GPU){
// one more splot shoudl do the job, as its an edge case.
splits++;
//recompute for later
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); // amountf of slices that fit on a GPU. Later we add 2 to these, as we need them for overlap
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
}
// How many EXTRA buffer slices shoudl be able to fit in here??!?!
mem_free=mem_GPU_global-(5*mem_img_each_GPU);
unsigned int extra_buff=(mem_free/mem_slice_image);
buffer_length=(extra_buff/2)/5; // we need double whatever this results in, rounded down.
buffer_length=min(MAX_BUFFER,buffer_length);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// Assert
if (mem_GPU_global< 5*mem_img_each_GPU){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","Bad assert. Logic behind spliting flawed! Please tell: [email protected]\n");
}
}
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes.
int isHostRegisterSupported;
hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,0);
if (isHostRegisterSupported & splits>1){
hipHostRegister(src ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),hipHostRegisterPortable);
hipHostRegister(dst ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),hipHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
// Lets allocate auxiliary variables.
float* buffer_u, *buffer_px, *buffer_py, *buffer_pz;
float* h_px, *h_py, *h_pz, *h_u;
if(splits>1){
//These take A LOT of memory and A LOT of time to use. If we can avoid using them, better.
if (buffer_length<maxIter){ // if we do only 1 big iter, they are not needed.
mexWarnMsgIdAndTxt("tvDenoise:tvdenoising:Memory","TV dneoising requires 5 times the image memory. Your GPU(s) do not have the required memory.\n This memory will be attempted to allocate on the CPU, Whic may fail or slow the computation by a very significant amount.\n If you want to kill the execution: CTRL+C");
hipHostMalloc((void**)&h_px,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
hipHostMalloc((void**)&h_py,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
hipHostMalloc((void**)&h_pz,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
}
h_u=dst;
}else{
hipHostMalloc((void**)&buffer_u, image_size[0]*image_size[1]*sizeof(float));
hipHostMalloc((void**)&buffer_px,image_size[0]*image_size[1]*sizeof(float));
hipHostMalloc((void**)&buffer_py,image_size[0]*image_size[1]*sizeof(float));
hipHostMalloc((void**)&buffer_pz,image_size[0]*image_size[1]*sizeof(float));
}
// We shoudl be good to go memory wise.
float** d_src =(float**)malloc(deviceCount*sizeof(float*));
float** d_u =(float**)malloc(deviceCount*sizeof(float*));
float** d_px =(float**)malloc(deviceCount*sizeof(float*));
float** d_py =(float**)malloc(deviceCount*sizeof(float*));
float** d_pz =(float**)malloc(deviceCount*sizeof(float*));
//Malloc
for(dev=0;dev<deviceCount;dev++){
hipSetDevice(dev);
// F
hipMalloc((void**)&d_src[dev], mem_img_each_GPU);
// U
hipMalloc((void**)&d_u [dev], mem_img_each_GPU);
// PX
hipMalloc((void**)&d_px[dev], mem_img_each_GPU);
// PY
hipMalloc((void**)&d_py[dev], mem_img_each_GPU);
// PZ
hipMalloc((void**)&d_pz[dev], mem_img_each_GPU);
}
hipDeviceSynchronize();
cudaCheckErrors("Malloc error");
// Create streams
int nStream_device=5;
int nStreams=deviceCount*nStream_device;
hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
for (int i = 0; i < nStream_device; ++i){
hipStreamCreate(&stream[i+dev*nStream_device]);
}
}
cudaCheckErrors("Stream creation fail");
// Allocate CPU buffer if needed, warn user if not.
unsigned int curr_slices;
unsigned long long curr_pixels;
size_t linear_idx_start;
unsigned long long buffer_pixels=buffer_length*image_size[0]*image_size[1];
unsigned long long* offset_device=(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* offset_host =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* bytes_device =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
bool is_first_chunk;
bool is_last_chunk;
float tau2, tau1;
for(unsigned int i=0;i<maxIter;i+=(buffer_length)){
for(unsigned int sp=0;sp<splits;sp++){
// For each iteration we need to comptue all the image. The ordering of these loops
// need to be like this due to the boudnign layers between slpits. If more than 1 split is needed
// for each GPU then there is no other way that taking the entire memory out of GPU and putting it back.
// If the memory can be shared ebtween GPUs fully without extra splits, then there is an easy way of syncronizing the memory
// Copy image to memory
for (dev = 0; dev < deviceCount; dev++){
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
curr_pixels=curr_slices*image_size[0]*image_size[1];
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
// Check if its the first or last chunck
is_last_chunk=!((sp*deviceCount+dev)<deviceCount*splits-1);
is_first_chunk=!(sp*deviceCount+dev);
// lets compute where we start copyes and how much. This avoids 3 calls to Memcpy
offset_device[dev]=buffer_pixels*is_first_chunk;
offset_host[dev]=linear_idx_start-buffer_pixels*!is_first_chunk;
bytes_device[dev]=curr_pixels+buffer_pixels*!is_first_chunk+buffer_pixels*!is_last_chunk;
}
if(i==0){
// Precompute indices and needed bytes
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemcpyAsync(d_src[dev]+offset_device[dev], src+offset_host[dev] , bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
// All these are async
hipMemcpyAsync(d_u[dev] +offset_device[dev], d_src[dev]+offset_device[dev], bytes_device[dev]*sizeof(float), hipMemcpyDeviceToDevice,stream[dev*nStream_device+1]);
hipMemsetAsync(d_px[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
hipMemsetAsync(d_py[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
hipMemsetAsync(d_pz[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
}
// we need all the stream to finish
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("Memcpy failure");
}
// if we need to split and its not the first iteration, then we need to copy from Host memory.
// d_src is the original image, with no change.
if (splits>1 & i>0){
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStream_device+1]);
hipMemcpyAsync(d_u [dev] +offset_device[dev], h_u +offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStream_device+2]);
hipMemcpyAsync(d_px[dev]+offset_device[dev], h_px+offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+2]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStream_device+3]);
hipMemcpyAsync(d_py[dev] +offset_device[dev], h_py+offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+3]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStream_device+4]);
hipMemcpyAsync(d_pz[dev] +offset_device[dev], h_pz+offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+4]);
}
for (dev = 0; dev < deviceCount; dev++){
hipStreamSynchronize(stream[dev*nStream_device+1]);
hipMemcpyAsync(d_src[dev]+offset_device[dev], src +offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
cudaCheckErrors("Memcpy failure on multi split");
}
}
// Inter interations.
for(unsigned int ib=0; (ib<(buffer_length)) && ((i+ib)<maxIter); ib++){
tau2 = 0.3f + 0.02f * (i+ib);
tau1 = (1.f/tau2) * ((1.f/6.f) - (5.f/(15.f+(i+ib))));
// bdim and gdim
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((image_size[0]+block.x-1)/block.x, (image_size[1]+block.y-1)/block.y, (curr_slices+buffer_length*2+block.z-1)/block.z);
hipLaunchKernelGGL(( update_u), dim3(grid), dim3(block),0,stream[dev*nStream_device], d_src[dev], d_pz[dev], d_py[dev], d_px[dev], d_u[dev], tau1, lambda,
(long)(curr_slices+buffer_length*2), image_size[1],image_size[0],
spacing[2], spacing[1], spacing[0]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((image_size[0]+block.x-1)/block.x, (image_size[1]+block.y-1)/block.y, (curr_slices+buffer_length*2+block.z-1)/block.z);
hipLaunchKernelGGL(( update_p), dim3(grid), dim3(block),0,stream[dev*nStream_device], d_u[dev], d_pz[dev], d_py[dev], d_px[dev], tau2,
(long)(curr_slices+buffer_length*2), image_size[1], image_size[0],
spacing[2], spacing[1], spacing[0]);
}
}// END internal iter
// Syncronize mathematics, make sure bounding pixels are correct
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
if (dev<deviceCount-1){
// U
hipSetDevice(dev+1);
hipMemcpyAsync(buffer_u , d_u[dev+1] , buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev+1)*nStream_device+1]);
hipMemcpyAsync(buffer_px, d_px[dev+1], buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev+1)*nStream_device+2]);
hipMemcpyAsync(buffer_py, d_py[dev+1], buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev+1)*nStream_device+3]);
hipMemcpyAsync(buffer_pz, d_pz[dev+1], buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev+1)*nStream_device+4]);
hipSetDevice(dev);
hipStreamSynchronize(stream[(dev+1)*nStream_device+1]);
hipMemcpyAsync(d_u[dev] +slices_per_split+buffer_pixels, buffer_u , buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+1]);
hipStreamSynchronize(stream[(dev+1)*nStream_device+2]);
hipMemcpyAsync(d_px[dev]+slices_per_split+buffer_pixels, buffer_px, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+2]);
hipStreamSynchronize(stream[(dev+1)*nStream_device+3]);
hipMemcpyAsync(d_py[dev]+slices_per_split+buffer_pixels, buffer_py, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+3]);
hipStreamSynchronize(stream[(dev+1)*nStream_device+4]);
hipMemcpyAsync(d_pz[dev]+slices_per_split+buffer_pixels, buffer_pz, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+4]);
}
hipDeviceSynchronize();
if (dev>0){
// U
hipSetDevice(dev-1);
hipMemcpyAsync(buffer_u, d_u[dev-1] +slices_per_split+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev-1)*nStream_device+1]);
hipMemcpyAsync(buffer_px, d_px[dev-1]+slices_per_split+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev-1)*nStream_device+2]);
hipMemcpyAsync(buffer_py, d_py[dev-1]+slices_per_split+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev-1)*nStream_device+3]);
hipMemcpyAsync(buffer_pz, d_pz[dev-1]+slices_per_split+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev-1)*nStream_device+4]);
hipSetDevice(dev);
hipStreamSynchronize(stream[(dev-1)*nStream_device+1]);
hipMemcpyAsync(d_u[dev] ,buffer_u , buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+1]);
hipStreamSynchronize(stream[(dev-1)*nStream_device+2]);
hipMemcpyAsync(d_px[dev],buffer_px, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+2]);
hipStreamSynchronize(stream[(dev-1)*nStream_device+3]);
hipMemcpyAsync(d_py[dev],buffer_py, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+3]);
hipStreamSynchronize(stream[(dev-1)*nStream_device+4]);
hipMemcpyAsync(d_pz[dev],buffer_pz, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+4]);
}
}
}else{
// We need to take it out :(
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
hipMemcpyAsync(&h_u[linear_idx_start], d_u [dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
if ((i+buffer_length)<maxIter){ // If its the last iteration, we dont need to get these out.
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
hipMemcpyAsync(&h_px[linear_idx_start], d_px[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+2]);
hipMemcpyAsync(&h_py[linear_idx_start], d_py[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+3]);
hipMemcpyAsync(&h_pz[linear_idx_start], d_pz[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+4]);
}
}
}
}//END splits
}//END main iter
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("TV minimization");
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
curr_slices=((dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*dev;
total_pixels=curr_slices*image_size[0]*image_size[1];
hipMemcpyAsync(dst+slices_per_split*image_size[0]*image_size[1]*dev, d_u[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("Copy result back");
for(dev=0; dev<deviceCount;dev++){
hipFree(d_src[dev]);
hipFree(d_u [dev]);
hipFree(d_pz[dev]);
hipFree(d_py[dev]);
hipFree(d_px[dev]);
}
if(splits>1 && buffer_length<maxIter){
hipHostFree(h_px);
hipHostFree(h_py);
hipHostFree(h_pz);
}else if(splits==1){
hipHostFree(buffer_u);
hipHostFree(buffer_px);
hipHostFree(buffer_py);
hipHostFree(buffer_pz);
}
for (int i = 0; i < nStreams; ++i)
hipStreamDestroy(stream[i]) ;
if (isHostRegisterSupported & splits>1){
hipHostUnregister(src);
hipHostUnregister(dst);
}
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("Copy free ");
}
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
|
d5d5c5f6cdc458ad4f4384055eab58dbd312ff0f.cu
|
/*-------------------------------------------------------------------------
*
* MATLAB MEX functions for TV image denoising. Check inputs and parses
* MATLAB data to C++ data.
*
*
* CODE by Imanol Luengo
* PhD student University of Nottingham
* [email protected]
* 2015
* Modified by Ander Biguri for multi-GPU
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
// http://gpu4vision.icg.tugraz.at/papers/2010/knoll.pdf#pub47
#define MAXTREADS 1024
#define MAX_BUFFER 60
#define BLOCK_SIZE 10 // BLOCK_SIZE^3 must be smaller than MAXTREADS
#include "tvdenoising.hpp"
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
cudaDeviceReset();\
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:TVdenoising",cudaGetErrorString(__err));\
} \
} while (0)
__device__ __inline__
float divergence(const float* pz, const float* py, const float* px,
long z, long y, long x, long depth, long rows, long cols,
float dz, float dy, float dx)
{
long size2d = rows*cols;
long idx = z * size2d + y * cols + x;
float _div = 0.0f;
if ( z - 1 >= 0 ) {
_div += (pz[idx] - pz[(z-1)*size2d + y*cols + x]) / dz;
} else {
_div += pz[idx];
}
if ( y - 1 >= 0 ) {
_div += (py[idx] - py[z*size2d + (y-1)*cols + x]) / dy;
} else {
_div += py[idx];
}
if ( x - 1 >= 0 ) {
_div += (px[idx] - px[z*size2d + y*cols + (x-1)]) / dx;
} else {
_div += px[idx];
}
return _div;
}
__device__ __inline__
void gradient(const float* u, float* grad,
long z, long y, long x,
long depth, long rows, long cols,
float dz, float dy, float dx)
{
long size2d = rows*cols;
long idx = z * size2d + y * cols + x;
float uidx = u[idx];
if ( z + 1 < depth ) {
grad[0] = (u[(z+1)*size2d + y*cols + x] - uidx) / dz;
}
if ( y + 1 < rows ) {
grad[1] = (u[z*size2d + (y+1)*cols + x] - uidx) / dy;
}
if ( x + 1 < cols ) {
grad[2] = (u[z*size2d + y*cols + (x+1)] - uidx) / dx;
}
}
__global__
void update_u(const float* f, const float* pz, const float* py, const float* px, float* u,
float tau, float lambda,
long depth, long rows, long cols,
float dz, float dy, float dx)
{
long x = threadIdx.x + blockIdx.x * blockDim.x;
long y = threadIdx.y + blockIdx.y * blockDim.y;
long z = threadIdx.z + blockIdx.z * blockDim.z;
long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float _div = divergence(pz, py, px, z, y, x, depth, rows, cols, dz, dy, dx);
u[idx] = u[idx] * (1.0f - tau) + tau * (f[idx] + (1.0f/lambda) * _div);
}
__global__
void update_p(const float* u, float* pz, float* py, float* px,
float tau, long depth, long rows, long cols,
float dz, float dy, float dx)
{
long x = threadIdx.x + blockIdx.x * blockDim.x;
long y = threadIdx.y + blockIdx.y * blockDim.y;
long z = threadIdx.z + blockIdx.z * blockDim.z;
long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float grad[3] = {0,0,0}, q[3];
gradient(u, grad, z, y, x, depth, rows, cols, dz, dy, dx);
q[0] = pz[idx] + tau * grad[0];
q[1] = py[idx] + tau * grad[1];
q[2] = px[idx] + tau * grad[2];
float norm = fmaxf(1.0f, sqrtf(q[0] * q[0] + q[1] * q[1] + q[2] * q[2]));
pz[idx] = q[0] / norm;
py[idx] = q[1] / norm;
px[idx] = q[2] / norm;
}
// Main function
void tvdenoising(float* src, float* dst, float lambda,
const float* spacing, const long* image_size, int maxIter){
// Prepare for MultiGPU
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPUselect","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning trhown)
int dev;
const int devicenamelength = 256; // The length 256 is fixed by spec of cudaDeviceProp::name
char devicename[devicenamelength];
cudaDeviceProp deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
cudaSetDevice(dev);
cudaGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicename,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("tvDenoise:tvdenoising:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n POCS_TV.cu line 277.");
break;
}
}
memset(devicename, 0, devicenamelength);
strcpy(devicename, deviceProp.name);
}
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
// %5 of free memory shoudl be enough, we have almsot no variables in these kernels
size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ;
size_t mem_slice_image = sizeof(float)* image_size[0] * image_size[1] ;
size_t mem_size_image = sizeof(float)* total_pixels;
// Decide how are we handling the distribution of computation
size_t mem_img_each_GPU;
unsigned int buffer_length=1;
//Does everything fit in the GPU?
unsigned int slices_per_split;
unsigned int splits=1; // if the number does not fit in an uint, you have more serious trouble than this.
if(mem_GPU_global> 5*mem_size_image+5*mem_slice_image*buffer_length*2){
// We only need to split if we have extra GPUs
slices_per_split=(image_size[2]+deviceCount-1)/deviceCount;
mem_img_each_GPU=mem_slice_image*( (image_size[2]+deviceCount-1)/deviceCount + buffer_length*2);
}else{
// As mem_auxiliary is not expected to be a large value (for a 2000^3 image is around 28Mbytes), lets for now assume we need it all
size_t mem_free=mem_GPU_global;
splits=(unsigned int)(ceil(((float)(5*mem_size_image)/(float)(deviceCount))/mem_free));
// Now, there is an overhead here, as each splits should have 2 slices more, to accoutn for overlap of images.
// lets make sure these 2 slices fit, if they do not, add 1 to splits.
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// if the new stuff does not fit in the GPU, it measn we are in the edge case where adding that extra slice will overflow memory
if (mem_GPU_global< 5*mem_img_each_GPU){
// one more splot shoudl do the job, as its an edge case.
splits++;
//recompute for later
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); // amountf of slices that fit on a GPU. Later we add 2 to these, as we need them for overlap
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
}
// How many EXTRA buffer slices shoudl be able to fit in here??!?!
mem_free=mem_GPU_global-(5*mem_img_each_GPU);
unsigned int extra_buff=(mem_free/mem_slice_image);
buffer_length=(extra_buff/2)/5; // we need double whatever this results in, rounded down.
buffer_length=min(MAX_BUFFER,buffer_length);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// Assert
if (mem_GPU_global< 5*mem_img_each_GPU){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","Bad assert. Logic behind spliting flawed! Please tell: [email protected]\n");
}
}
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes.
int isHostRegisterSupported;
cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,0);
if (isHostRegisterSupported & splits>1){
cudaHostRegister(src ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),cudaHostRegisterPortable);
cudaHostRegister(dst ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),cudaHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
// Lets allocate auxiliary variables.
float* buffer_u, *buffer_px, *buffer_py, *buffer_pz;
float* h_px, *h_py, *h_pz, *h_u;
if(splits>1){
//These take A LOT of memory and A LOT of time to use. If we can avoid using them, better.
if (buffer_length<maxIter){ // if we do only 1 big iter, they are not needed.
mexWarnMsgIdAndTxt("tvDenoise:tvdenoising:Memory","TV dneoising requires 5 times the image memory. Your GPU(s) do not have the required memory.\n This memory will be attempted to allocate on the CPU, Whic may fail or slow the computation by a very significant amount.\n If you want to kill the execution: CTRL+C");
cudaMallocHost((void**)&h_px,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
cudaMallocHost((void**)&h_py,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
cudaMallocHost((void**)&h_pz,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
}
h_u=dst;
}else{
cudaMallocHost((void**)&buffer_u, image_size[0]*image_size[1]*sizeof(float));
cudaMallocHost((void**)&buffer_px,image_size[0]*image_size[1]*sizeof(float));
cudaMallocHost((void**)&buffer_py,image_size[0]*image_size[1]*sizeof(float));
cudaMallocHost((void**)&buffer_pz,image_size[0]*image_size[1]*sizeof(float));
}
// We shoudl be good to go memory wise.
float** d_src =(float**)malloc(deviceCount*sizeof(float*));
float** d_u =(float**)malloc(deviceCount*sizeof(float*));
float** d_px =(float**)malloc(deviceCount*sizeof(float*));
float** d_py =(float**)malloc(deviceCount*sizeof(float*));
float** d_pz =(float**)malloc(deviceCount*sizeof(float*));
//Malloc
for(dev=0;dev<deviceCount;dev++){
cudaSetDevice(dev);
// F
cudaMalloc((void**)&d_src[dev], mem_img_each_GPU);
// U
cudaMalloc((void**)&d_u [dev], mem_img_each_GPU);
// PX
cudaMalloc((void**)&d_px[dev], mem_img_each_GPU);
// PY
cudaMalloc((void**)&d_py[dev], mem_img_each_GPU);
// PZ
cudaMalloc((void**)&d_pz[dev], mem_img_each_GPU);
}
cudaDeviceSynchronize();
cudaCheckErrors("Malloc error");
// Create streams
int nStream_device=5;
int nStreams=deviceCount*nStream_device;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
for (int i = 0; i < nStream_device; ++i){
cudaStreamCreate(&stream[i+dev*nStream_device]);
}
}
cudaCheckErrors("Stream creation fail");
// Allocate CPU buffer if needed, warn user if not.
unsigned int curr_slices;
unsigned long long curr_pixels;
size_t linear_idx_start;
unsigned long long buffer_pixels=buffer_length*image_size[0]*image_size[1];
unsigned long long* offset_device=(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* offset_host =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* bytes_device =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
bool is_first_chunk;
bool is_last_chunk;
float tau2, tau1;
for(unsigned int i=0;i<maxIter;i+=(buffer_length)){
for(unsigned int sp=0;sp<splits;sp++){
// For each iteration we need to comptue all the image. The ordering of these loops
// need to be like this due to the boudnign layers between slpits. If more than 1 split is needed
// for each GPU then there is no other way that taking the entire memory out of GPU and putting it back.
// If the memory can be shared ebtween GPUs fully without extra splits, then there is an easy way of syncronizing the memory
// Copy image to memory
for (dev = 0; dev < deviceCount; dev++){
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
curr_pixels=curr_slices*image_size[0]*image_size[1];
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
// Check if its the first or last chunck
is_last_chunk=!((sp*deviceCount+dev)<deviceCount*splits-1);
is_first_chunk=!(sp*deviceCount+dev);
// lets compute where we start copyes and how much. This avoids 3 calls to Memcpy
offset_device[dev]=buffer_pixels*is_first_chunk;
offset_host[dev]=linear_idx_start-buffer_pixels*!is_first_chunk;
bytes_device[dev]=curr_pixels+buffer_pixels*!is_first_chunk+buffer_pixels*!is_last_chunk;
}
if(i==0){
// Precompute indices and needed bytes
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemcpyAsync(d_src[dev]+offset_device[dev], src+offset_host[dev] , bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
// All these are async
cudaMemcpyAsync(d_u[dev] +offset_device[dev], d_src[dev]+offset_device[dev], bytes_device[dev]*sizeof(float), cudaMemcpyDeviceToDevice,stream[dev*nStream_device+1]);
cudaMemsetAsync(d_px[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
cudaMemsetAsync(d_py[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
cudaMemsetAsync(d_pz[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
}
// we need all the stream to finish
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("Memcpy failure");
}
// if we need to split and its not the first iteration, then we need to copy from Host memory.
// d_src is the original image, with no change.
if (splits>1 & i>0){
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStream_device+1]);
cudaMemcpyAsync(d_u [dev] +offset_device[dev], h_u +offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStream_device+2]);
cudaMemcpyAsync(d_px[dev]+offset_device[dev], h_px+offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+2]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStream_device+3]);
cudaMemcpyAsync(d_py[dev] +offset_device[dev], h_py+offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+3]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStream_device+4]);
cudaMemcpyAsync(d_pz[dev] +offset_device[dev], h_pz+offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+4]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaStreamSynchronize(stream[dev*nStream_device+1]);
cudaMemcpyAsync(d_src[dev]+offset_device[dev], src +offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
cudaCheckErrors("Memcpy failure on multi split");
}
}
// Inter interations.
for(unsigned int ib=0; (ib<(buffer_length)) && ((i+ib)<maxIter); ib++){
tau2 = 0.3f + 0.02f * (i+ib);
tau1 = (1.f/tau2) * ((1.f/6.f) - (5.f/(15.f+(i+ib))));
// bdim and gdim
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((image_size[0]+block.x-1)/block.x, (image_size[1]+block.y-1)/block.y, (curr_slices+buffer_length*2+block.z-1)/block.z);
update_u<<<grid, block,0,stream[dev*nStream_device]>>>(d_src[dev], d_pz[dev], d_py[dev], d_px[dev], d_u[dev], tau1, lambda,
(long)(curr_slices+buffer_length*2), image_size[1],image_size[0],
spacing[2], spacing[1], spacing[0]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((image_size[0]+block.x-1)/block.x, (image_size[1]+block.y-1)/block.y, (curr_slices+buffer_length*2+block.z-1)/block.z);
update_p<<<grid, block,0,stream[dev*nStream_device]>>>(d_u[dev], d_pz[dev], d_py[dev], d_px[dev], tau2,
(long)(curr_slices+buffer_length*2), image_size[1], image_size[0],
spacing[2], spacing[1], spacing[0]);
}
}// END internal iter
// Syncronize mathematics, make sure bounding pixels are correct
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
if (dev<deviceCount-1){
// U
cudaSetDevice(dev+1);
cudaMemcpyAsync(buffer_u , d_u[dev+1] , buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev+1)*nStream_device+1]);
cudaMemcpyAsync(buffer_px, d_px[dev+1], buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev+1)*nStream_device+2]);
cudaMemcpyAsync(buffer_py, d_py[dev+1], buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev+1)*nStream_device+3]);
cudaMemcpyAsync(buffer_pz, d_pz[dev+1], buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev+1)*nStream_device+4]);
cudaSetDevice(dev);
cudaStreamSynchronize(stream[(dev+1)*nStream_device+1]);
cudaMemcpyAsync(d_u[dev] +slices_per_split+buffer_pixels, buffer_u , buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+1]);
cudaStreamSynchronize(stream[(dev+1)*nStream_device+2]);
cudaMemcpyAsync(d_px[dev]+slices_per_split+buffer_pixels, buffer_px, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+2]);
cudaStreamSynchronize(stream[(dev+1)*nStream_device+3]);
cudaMemcpyAsync(d_py[dev]+slices_per_split+buffer_pixels, buffer_py, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+3]);
cudaStreamSynchronize(stream[(dev+1)*nStream_device+4]);
cudaMemcpyAsync(d_pz[dev]+slices_per_split+buffer_pixels, buffer_pz, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+4]);
}
cudaDeviceSynchronize();
if (dev>0){
// U
cudaSetDevice(dev-1);
cudaMemcpyAsync(buffer_u, d_u[dev-1] +slices_per_split+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev-1)*nStream_device+1]);
cudaMemcpyAsync(buffer_px, d_px[dev-1]+slices_per_split+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev-1)*nStream_device+2]);
cudaMemcpyAsync(buffer_py, d_py[dev-1]+slices_per_split+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev-1)*nStream_device+3]);
cudaMemcpyAsync(buffer_pz, d_pz[dev-1]+slices_per_split+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev-1)*nStream_device+4]);
cudaSetDevice(dev);
cudaStreamSynchronize(stream[(dev-1)*nStream_device+1]);
cudaMemcpyAsync(d_u[dev] ,buffer_u , buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+1]);
cudaStreamSynchronize(stream[(dev-1)*nStream_device+2]);
cudaMemcpyAsync(d_px[dev],buffer_px, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+2]);
cudaStreamSynchronize(stream[(dev-1)*nStream_device+3]);
cudaMemcpyAsync(d_py[dev],buffer_py, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+3]);
cudaStreamSynchronize(stream[(dev-1)*nStream_device+4]);
cudaMemcpyAsync(d_pz[dev],buffer_pz, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+4]);
}
}
}else{
// We need to take it out :(
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
cudaMemcpyAsync(&h_u[linear_idx_start], d_u [dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
if ((i+buffer_length)<maxIter){ // If its the last iteration, we dont need to get these out.
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
cudaMemcpyAsync(&h_px[linear_idx_start], d_px[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+2]);
cudaMemcpyAsync(&h_py[linear_idx_start], d_py[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+3]);
cudaMemcpyAsync(&h_pz[linear_idx_start], d_pz[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+4]);
}
}
}
}//END splits
}//END main iter
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("TV minimization");
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
curr_slices=((dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*dev;
total_pixels=curr_slices*image_size[0]*image_size[1];
cudaMemcpyAsync(dst+slices_per_split*image_size[0]*image_size[1]*dev, d_u[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("Copy result back");
for(dev=0; dev<deviceCount;dev++){
cudaFree(d_src[dev]);
cudaFree(d_u [dev]);
cudaFree(d_pz[dev]);
cudaFree(d_py[dev]);
cudaFree(d_px[dev]);
}
if(splits>1 && buffer_length<maxIter){
cudaFreeHost(h_px);
cudaFreeHost(h_py);
cudaFreeHost(h_pz);
}else if(splits==1){
cudaFreeHost(buffer_u);
cudaFreeHost(buffer_px);
cudaFreeHost(buffer_py);
cudaFreeHost(buffer_pz);
}
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]) ;
if (isHostRegisterSupported & splits>1){
cudaHostUnregister(src);
cudaHostUnregister(dst);
}
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("Copy free ");
}
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
|
224aa8fc8fe82bd4aced9bc425f74a0600ae8f29.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pybind_gpu.h"
void parallel_add(int* arr1, int* arr2, int* result, int size) {
gpu_add(arr1, arr2, result, size);
}
__global__ void gpu_add(int* arr1, int* arr2, int* result, int size) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
result[idx] = arr1[idx] + arr2[idx];
}
}
|
224aa8fc8fe82bd4aced9bc425f74a0600ae8f29.cu
|
#include "pybind_gpu.h"
void parallel_add(int* arr1, int* arr2, int* result, int size) {
gpu_add(arr1, arr2, result, size);
}
__global__ void gpu_add(int* arr1, int* arr2, int* result, int size) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
result[idx] = arr1[idx] + arr2[idx];
}
}
|
a5cbc4aead9b14fb788179516b3031880bc01553.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.cuh"
#include "bnorm.cuh"
#include "cutens.h"
void cubnorm (cuftens *mean, cuftens *istd,
cuftens *beta, cuftens *gamma,
cuftens *in)
{
const int TS=32;
const int M=in->N, N=in->N, L=in->L;
const int len = cuftens_len(in);
hipLaunchKernelGGL(( ker_bnorm) , dim3(CEIL(len, TS)), dim3(TS), 0, 0,
mean->data, istd->data,
beta->data, gamma->data, len, L > 1 ? L : M*N,
in->data);
}
|
a5cbc4aead9b14fb788179516b3031880bc01553.cu
|
#include "util.cuh"
#include "bnorm.cuh"
#include "cutens.h"
void cubnorm (cuftens *mean, cuftens *istd,
cuftens *beta, cuftens *gamma,
cuftens *in)
{
const int TS=32;
const int M=in->N, N=in->N, L=in->L;
const int len = cuftens_len(in);
ker_bnorm <<<CEIL(len, TS), TS>>>
(mean->data, istd->data,
beta->data, gamma->data, len, L > 1 ? L : M*N,
in->data);
}
|
9a1822d72e53ad963e88485ce6e02847428cae56.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <GL/glew.h>
#include <GL/gl.h>
#include <GL/glut.h>
#include <GL/freeglut.h>
#include <math.h>
#include <stdbool.h>
#include <omp.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
//TDB
//
#define PI 3.141592653589793
//
#define cap 1000
#define ref 0.9//0.5
#define temp 4000
#define visc 9
#define GRAV (6.674*0.00000000000000000001)
#define density (2.5 * 1000000000000)
#define sigma (0.96*5.67*0.00000001) //W/m^2 T^4
#define cool (sigma*4*PI*rad*rad*1000000*10)
//
#define rad 300//40 //km
#define M (4 / 3 * PI * rad*rad*rad* density)//kg
//
#define MOONOFFSET_X (INIT_WIDTH/vision*2)
#define MOONOFFSET_Y (INIT_WIDTH/vision*3)
#define MOONOFFSET_Z (INIT_HEIGHT/vision*3)
#define dev 360//12
#define resol 10
#define hollow 100//10
#define X 0
#define Y 1
#define Z 2
#define ANIM_START 0
#define ANIM 20//10
#define scale 0.01
#define colmargin 1.0001//1.0001
#define adjust 0.999
#define R (rad * scale)
#define INIT_WIDTH 800
#define INIT_HEIGHT 800
#define vision 40
#define Grid_x 2//block__syncthread
#define Grid_y 2
#define Grid_z 1
#define Block_x 2//32
#define Block_y 2//16
#define Block_z 2//1
#define NUM_POINTS (Grid_x*Grid_y*Grid_z*Block_x*Block_y*Block_z)
unsigned int dev_points = dev + 1;
unsigned int window_width = INIT_WIDTH;
unsigned int window_height = INIT_HEIGHT;
double vision_size = vision;
float right_motion=0;
float up_motion=0;
double left, right, bottom, top;
float h_point[NUM_POINTS][3]={0};
float v_point[NUM_POINTS][3]={0};
float st_point[NUM_POINTS]={0};
float e_point[NUM_POINTS]={0};
float J_point[NUM_POINTS]={0};
float hv_buff[NUM_POINTS][3]={0};
float hp_buff[NUM_POINTS][3]={0};
float anim_time = ANIM_START;
float anim_dt = ANIM;
double phi = 30.0;
double theta = 30.0;
float light_pos[4]={0};
int mouse_old_x, mouse_old_y;
bool motion_p;
bool motion_w;
double eye[3]={0};
double center[3] = {0.0, 0.0, 0.0};
double up[3]={0};
double ** point;
float (*d_point)[3];
float (*dv_point)[3];
float (*dst_point);
float (*de_point);
float (*dJ_point);
float (*v_buff)[3];
float (*p_buff)[3];
float colsynctime[NUM_POINTS][NUM_POINTS]={0};
int colsyncindex[NUM_POINTS][NUM_POINTS]={0};
float (*dcolsynctime)[NUM_POINTS];
int (*dcolsyncindex)[NUM_POINTS];
__global__ void grav_coldetect(float(*pos)[3],float(*vec)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS],float(*p_buff)[3]);
__global__ void grav_padjust(float(*pos)[3], float(*p_buff)[3]);
__global__ void grav_colv(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]);
__global__ void grav_v(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],int(*colindex)[NUM_POINTS]);
__global__ void grav_vupdate(float(*vec)[3],float(*v_buff)[3]);
__global__ void buff_clear(float(*v_buff)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS],float(*p_buff)[3]);
__global__ void grav_p(float (*pos)[3], float(*vec)[3]);
//
double dot(double vec0[], double vec1[])
{
return(vec0[X] * vec1[X] + vec0[Y] * vec1[Y] + vec0[Z] * vec1[Z]);
}
void cross(double vec0[], double vec1[], double vec2[])
{
vec2[X] = vec0[Y] * vec1[Z] - vec0[Z] * vec1[Y];
vec2[Y] = vec0[Z] * vec1[X] - vec0[X] * vec1[Z];
vec2[Z] = vec0[X] * vec1[Y] - vec0[Y] * vec1[X];
}
void normVec(double vec[])
{
double norm;
norm = sqrt(vec[X] * vec[X] + vec[Y] * vec[Y] + vec[Z] * vec[Z]);
vec[X] /= norm;
vec[Y] /= norm;
vec[Z] /= norm;
}
void normal(double p0[], double p1[], double p2[], double normal[])
{
unsigned int i;
double v0[3], v1[3];
for (i = 0; i < 3; i++) {
v0[i] = p2[i] - p1[i];
v1[i] = p0[i] - p1[i];
}
cross(v0, v1, normal);
normVec(normal);
}
//
__global__ void grav_coldetect(float(*pos)[3],float(*vec)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS],float(*p_buff)[3])
{
float xn,yn,zn,vx,vy,vz,dis,sq;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
float rvec[3]={0};
xn = pos[index][X];
yn = pos[index][Y];
zn = pos[index][Z];
vx = vec[index][X];
vy = vec[index][Y];
vz = vec[index][Z];
p_buff[index][X]=xn;
p_buff[index][Y]=yn;
p_buff[index][Z]=zn;
for (int i = 0 ; i < NUM_POINTS; i++)
{
sq = (float)pow((double)(xn-pos[i][X]),2) + pow((double)(yn-pos[i][Y]),2) + pow((double)(zn-pos[i][Z]),2);
dis = (float)sqrt((double)sq);
rvec[X]=(pos[i][X]-xn)/dis;
rvec[Y]=(pos[i][Y]-yn)/dis;
rvec[Z]=(pos[i][Z]-zn)/dis;
//
if (dis > 2 * R && i != index)
{
colindex[index][i]=NUM_POINTS;
}
else if (dis <= 2 * R && i != index)
{
//
colindex[index][i]=i;
coltime[index][i]=(2*R-dis)/((vx-vec[i][X])*rvec[X]+(vy-vec[i][Y])*rvec[Y]+(vz-vec[i][Z])*rvec[Z]);
//
if(dis <= 2 * R * adjust)
{
p_buff[index][X]+=(p_buff[index][X]-pos[i][X])/dis*(2*R*colmargin-dis);
p_buff[index][Y]+=(p_buff[index][Y]-pos[i][Y])/dis*(2*R*colmargin-dis);
p_buff[index][Z]+=(p_buff[index][Z]-pos[i][Z])/dis*(2*R*colmargin-dis);
}
}
else
{
colindex[index][i]=NUM_POINTS;
}
}
}
//
__global__ void grav_padjust(float(*pos)[3], float(*p_buff)[3])
{
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
pos[index][X]=p_buff[index][X];
pos[index][Y]=p_buff[index][Y];
pos[index][Z]=p_buff[index][Z];
}
//
__global__ void grav_colv(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS])
{
float xn,yn,zn,sq,dis;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
int colnum=0;
float tmptime=0;
int tmpindex=0;
int coldex=0;
float repul=0;
float rvec[3]={0};
float Vl[3]={0};
float Vr[3]={0};
float Vh[3]={0};
float vl_buff[3]={0};
float vr_buff[3]={0};
float vcol_buff[3]={0};
float dotV=0;
xn = pos[index][X];
yn = pos[index][Y];
zn = pos[index][Z];
vl_buff[X]=vec[index][X];
vl_buff[Y]=vec[index][Y];
vl_buff[Z]=vec[index][Z];
for (int i = 0 ; i < NUM_POINTS; i++){
if(colindex[index][i]!=NUM_POINTS){
colnum++;
}
}
if(colnum>0){
//
for(int i = 0 ; i < NUM_POINTS; i++){
for(int j = i+1; j < NUM_POINTS; j++){
if(coltime[index][i] > coltime[index][j]){
tmptime=coltime[index][i];
tmpindex=colindex[index][i];
coltime[index][i]=coltime[index][j];
colindex[index][i]=colindex[index][j];
coltime[index][j]=tmptime;
colindex[index][j]=tmpindex;
}
}
}
//
for (int i=NUM_POINTS-1 ; i>=NUM_POINTS-colnum; i--){
coldex=colindex[index][i];
sq = (float)pow((double)(xn-pos[coldex][X]),2)+pow((double)(yn-pos[coldex][Y]),2)+pow((double)(zn-pos[coldex][Z]),2);
dis = (float)sqrt((double)sq);
//
rvec[X]=(pos[coldex][X]-xn)/dis;
rvec[Y]=(pos[coldex][Y]-yn)/dis;
rvec[Z]=(pos[coldex][Z]-zn)/dis;
//
dotV=rvec[X]*vl_buff[X]+rvec[Y]*vl_buff[Y]+rvec[Z]*vl_buff[Z];
Vl[X]=dotV*rvec[X];
Vl[Y]=dotV*rvec[Y];
Vl[Z]=dotV*rvec[Z];
//
dotV=rvec[X]*vec[coldex][X]+rvec[Y]*vec[coldex][Y]+rvec[Z]*vec[coldex][Z];
Vr[X]=dotV*rvec[X];
Vr[Y]=dotV*rvec[Y];
Vr[Z]=dotV*rvec[Z];
//
Vh[X]=vl_buff[X]-Vl[X];
Vh[Y]=vl_buff[Y]-Vl[Y];
Vh[Z]=vl_buff[Z]-Vl[Z];
//
repul=e[index];
if (e[coldex] < e[index]) {
repul=e[coldex];
}
//
vcol_buff[X]=Vh[X]+((1+repul)*Vr[X]+(1-repul)*Vl[X])/2;
vcol_buff[Y]=Vh[Y]+((1+repul)*Vr[Y]+(1-repul)*Vl[Y])/2;
vcol_buff[Z]=Vh[Z]+((1+repul)*Vr[Z]+(1-repul)*Vl[Z])/2;
//
vr_buff[X]=vec[coldex][X]-Vr[X]+((1+repul)*Vl[X]+(1-repul)*Vr[X])/2;
vr_buff[Y]=vec[coldex][Y]-Vr[Y]+((1+repul)*Vl[Y]+(1-repul)*Vr[Y])/2;
vr_buff[Z]=vec[coldex][Z]-Vr[Z]+((1+repul)*Vl[Z]+(1-repul)*Vr[Z])/2;
//
double Energy=0.5*M*(pow(vec[coldex][X],2)+pow(vec[coldex][Y],2)+pow(vec[coldex][Z],2)+pow(vl_buff[X],2)+pow(vl_buff[Y],2)+pow(vl_buff[Z],2) - (pow(vcol_buff[X],2)+pow(vcol_buff[Y],2)+pow(vcol_buff[Z],2)+pow(vr_buff[X],2)+pow(vr_buff[Y],2)+pow(vr_buff[Z],2))) / pow(scale,2) * 1000000;
J[index] += Energy / (pow(10.0,(double)(sti[index]-sti[coldex]))+1);
//10000000
if (J[index] > M * cap * 10000000){
J[index] = M * cap * 10000000;
}
vl_buff[X]=vcol_buff[X];
vl_buff[Y]=vcol_buff[Y];
vl_buff[Z]=vcol_buff[Z];
// 1001
e[index] = 1 - ((1-ref)/temp * J[index]/M/cap);
if ( e[index] < 0 ){ e[index] = 0; }
if ( e[index] > 1 ){ e[index] = 1; }
sti[index] = visc - ((J[index]/M/cap - temp) / 100);
}
v_buff[index][X]=vl_buff[X];
v_buff[index][Y]=vl_buff[Y];
v_buff[index][Z]=vl_buff[Z];
}
//
J[index]-=cool*(J[index]/M/cap)*(J[index]/M/cap)*(J[index]/M/cap)*(J[index]/M/cap)*ANIM;
//
if (J[index] < 0) {
J[index] = 0;
}
}
//
__global__ void grav_v(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],int(*colindex)[NUM_POINTS])
{
float xn,yn,zn,vx,vy,vz,sq,dis;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
int colnum=0;
float gravity=0;
xn = pos[index][X];
yn = pos[index][Y];
zn = pos[index][Z];
for (int i = 0 ; i < NUM_POINTS; i++){
if(colindex[index][i]!=NUM_POINTS){
colnum++;
}
}
if(colnum==0){
//
vx = vec[index][X];
vy = vec[index][Y];
vz = vec[index][Z];
for (int i = 0 ; i < NUM_POINTS; i++){
if (i!=index) {
sq = (float)pow((double)(xn-pos[i][X]),2) + pow((double)(yn-pos[i][Y]),2) + pow((double)(zn-pos[i][Z]),2);
gravity=GRAV*M/sq*scale*scale;
dis = (float)sqrt((double)sq);
vx += ((pos[i][X]-xn)/dis)*gravity*ANIM*scale;
vy += ((pos[i][Y]-yn)/dis)*gravity*ANIM*scale;
vz += ((pos[i][Z]-zn)/dis)*gravity*ANIM*scale;
}
}
}
else if(colnum <= 12){//
//
vx = v_buff[index][X];
vy = v_buff[index][Y];
vz = v_buff[index][Z];
for (int i = 0 ; i < NUM_POINTS; i++){
sq = (float)pow((double)(xn-pos[i][X]),2) + pow((double)(yn-pos[i][Y]),2) + pow((double)(zn-pos[i][Z]),2);
gravity=GRAV*M/sq*scale*scale;
dis = (float)sqrt((double)sq);
if(dis > 2 * R) {
vx += ((pos[i][X]-xn)/dis)*gravity*ANIM*scale;
vy += ((pos[i][Y]-yn)/dis)*gravity*ANIM*scale;
vz += ((pos[i][Z]-zn)/dis)*gravity*ANIM*scale;
}
}
}
else{
vx = v_buff[index][X];
vy = v_buff[index][Y];
vz = v_buff[index][Z];
}
v_buff[index][X] = vx;
v_buff[index][Y] = vy;
v_buff[index][Z] = vz;
}
__global__ void grav_vupdate(float(*vec)[3],float(*v_buff)[3])
{
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
vec[index][X]=v_buff[index][X];
vec[index][Y]=v_buff[index][Y];
vec[index][Z]=v_buff[index][Z];
}
//
__global__ void buff_clear(float(*v_buff)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS],float(*p_buff)[3])
{
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
for (int i=0; i < 3; i++){
v_buff[index][i]=0;
p_buff[index][i]=0;
}
for (int i=0; i < NUM_POINTS; i++){
coltime[index][i]=0;
colindex[index][i]=NUM_POINTS;
}
}
//
__global__ void grav_p(float(*pos)[3], float(*vec)[3])
{
float xn,yn,zn,vx,vy,vz;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = ( blockDim.x * (Grid_x - 1) + blockDim.x ) * ( blockDim.y * (Grid_y - 1) + blockDim.y ) * thread_idz + ( blockDim.x * (Grid_x - 1) + blockDim.x ) * thread_idy + thread_idx ;
xn = pos[index][X];
yn = pos[index][Y];
zn = pos[index][Z];
vx = vec[index][X];
vy = vec[index][Y];
vz = vec[index][Z];
pos[index][X] = xn + vx * ANIM;
pos[index][Y] = yn + vy * ANIM;
pos[index][Z] = zn + vz * ANIM;
}
//
void setInitialPosition(void)
{
for (int i = 0; i < NUM_POINTS; i++) {
for (int j = 0 ; j < 3 ; j++){
h_point[i][j] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision*2 ;
v_point[i][j] = 0;
hv_buff[i][j] = 0;
hp_buff[i][j] = 0;
}
e_point[i]=ref;
J_point[i]=cap*M*temp;
/*
int earth_points = NUM_POINTS - (NUM_POINTS/64);
if(i < earth_points){
for (int j = 0 ; j < 3 ; j++){
h_point[i][j] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision/2 ;
v_point[i][j] = 0;
hv_buff[i][j] = 0;
}
e_point[i]=ref;
J_point[i]=cap*M*temp;
}
else {
h_point[i][X] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision/6 + MOONOFFSET_X;
h_point[i][Y] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision/6 + MOONOFFSET_Y;
h_point[i][Z] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision/6 + MOONOFFSET_Z;
v_point[i][X] = -(MOONOFFSET_X*scale/ANIM)/4.5;
v_point[i][Y] = -(MOONOFFSET_Y*scale/ANIM)/5;
v_point[i][Z] = -(MOONOFFSET_Z*scale/ANIM)/5;
for (int j = 0 ; j < 3 ; j++){
hv_buff[i][j] = 0;
}
e_point[i]=0;
J_point[i]=cap*M*temp*10;
}
*/
st_point[i]=visc;
for (int j = 0; j < NUM_POINTS; j++) {
colsyncindex[i][j]=NUM_POINTS;
}
}
checkCudaErrors(hipMalloc((void**)&d_point, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&dv_point, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&v_buff, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&p_buff, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&dst_point, NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&de_point, NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&dJ_point, NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&dcolsynctime, NUM_POINTS*NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&dcolsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int)));
checkCudaErrors(hipMemcpy(d_point, h_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dv_point, v_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(v_buff, hv_buff, 3 * NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(p_buff, hp_buff, 3 * NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dst_point, st_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(de_point, e_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dJ_point, J_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dcolsynctime, colsynctime, NUM_POINTS*NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dcolsyncindex, colsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int) , hipMemcpyHostToDevice));
}
//CUDA
void launchGPUKernel(unsigned int num_particles,float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS],float(*p_buff)[3])
{
dim3 grid(Grid_x,Grid_y,Grid_z);
dim3 block(Block_x,Block_y,Block_z);
hipLaunchKernelGGL(( grav_coldetect), dim3(grid) , dim3(block), 0, 0, pos, vec,coltime,colindex,p_buff);
hipLaunchKernelGGL(( grav_padjust), dim3(grid) , dim3(block), 0, 0, pos, p_buff);
hipLaunchKernelGGL(( grav_colv), dim3(grid) , dim3(block), 0, 0, pos,vec,v_buff,sti,e,J,coltime,colindex);
hipLaunchKernelGGL(( grav_v), dim3(grid) , dim3(block), 0, 0, pos,vec,v_buff,colindex);
hipLaunchKernelGGL(( grav_vupdate), dim3(grid) , dim3(block), 0, 0, vec,v_buff);
hipLaunchKernelGGL(( buff_clear), dim3(grid) , dim3(block), 0, 0, v_buff,coltime,colindex,p_buff);
hipLaunchKernelGGL(( grav_p), dim3(grid) , dim3(block), 0, 0, pos,vec);
}
//
void runGPUKernel(void)
{
launchGPUKernel(NUM_POINTS, d_point, dv_point,v_buff,dst_point, de_point,dJ_point,dcolsynctime,dcolsyncindex,p_buff);
checkCudaErrors(hipMemcpy(h_point, d_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(v_point, dv_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(hv_buff, v_buff, 3 * NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(hp_buff, p_buff, 3 * NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(st_point, dst_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(e_point, de_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(J_point, dJ_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(colsynctime,dcolsynctime, NUM_POINTS*NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(colsyncindex,dcolsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int) , hipMemcpyDeviceToHost));
anim_time += anim_dt;
}
//
void defineViewMatrix(double phi, double theta)
{
unsigned int i;
double c, s, xy_dist;
double x_axis[3], y_axis[3], z_axis[3];
//
eye[Z] = sin(theta * PI / 180.0);
xy_dist = cos(theta * PI / 180.0);
c = cos(phi * PI / 180.0);
s = sin(phi * PI / 180.0);
eye[X] = xy_dist * c;
eye[Y] = xy_dist * s;
up[X] = - c * eye[Z];
up[Y] = - s * eye[Z];
up[Z] = s * eye[Y] + c * eye[X];
normVec(up);
//
for (i = 0; i < 3; i++)
{
z_axis[i] = eye[i] - center[i];
}
normVec(z_axis);
cross(up, z_axis, x_axis);
normVec(x_axis);
cross(z_axis, x_axis, y_axis);
gluLookAt(eye[X], eye[Y], eye[Z], center[X], center[Y], center[Z], up[X], up[Y], up[Z]);
}
void metaball (float pos[3], float color[3]) {
double margin=0;
double view[3]={0};
double TH=theta;
double PH=-phi;
for (int i = 0 ; i < dev_points ; i ++)
{
view[X] = 0;
view[Y] = R * cos(i * PI * 2 / dev);
view[Z] = R * sin(i * PI * 2 / dev);
//
point[i][X] = view[X] * cos(TH * PI / 180) * cos(PH * PI / 180) + view[Y] * sin(PH * PI / 180) - view[Z] * sin(TH * PI / 180) * cos(PH * PI / 180);
point[i][Y] = - view[X] * cos(TH * PI / 180) * sin(PH * PI / 180) + view[Y] * cos(PH * PI / 180) + view[Z] * sin(TH * PI / 180) * sin(PH * PI / 180);
point[i][Z] = view[X] * sin(TH * PI / 180) + view[Z] * cos(TH * PI / 180);
}
//
glBegin(GL_TRIANGLE_FAN);
glColor4f(1,1,1,0.3);
glVertex3d(pos[X],pos[Y],pos[Z]);
for (int i = 0 ; i < dev_points ; i ++)
{
glVertex3d(point[i][X] + pos[X], point[i][Y] + pos[Y], point[i][Z] + pos[Z]);
}
glEnd();
//
int mh[dev_points];
for (int i = 0 ; i < dev_points ; i ++)
{
mh[i]=1;
}
glBegin(GL_POINTS);
glColor4f(color[X],color[Y],color[Z],0.1);
for (int k = 0; k < hollow; k++) {
margin=0.5/hollow*k+1;
for (int i = 0 ; i < dev_points ; i ++)
{
if((mh[i]==1 || mh[i-1]==1 || mh[i+1]==1) && (rand() % dev) < (dev * (hollow-k/2)/hollow))
glVertex3d(margin*point[i][X] + pos[X], margin*point[i][Y] + pos[Y], margin*point[i][Z] + pos[Z]);
else
mh[i]=0;
}
}
glEnd();
}
void display(void)
{
light_pos[0] = (float)eye[X];
light_pos[1] = (float)eye[Y];
light_pos[2] = (float)eye[Z];
light_pos[3] = 0.0f;
//CUDA
runGPUKernel();
//
glLightfv(GL_LIGHT0, GL_POSITION, light_pos);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-vision_size-right_motion/2, vision_size+right_motion/2, -vision_size-right_motion/2, vision_size+right_motion/2, -100*vision_size, 100*vision_size);
glViewport(0, 0, window_width, window_height);
defineViewMatrix(phi, theta);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//
float color[3]={0};
for (int k = 0 ; k < NUM_POINTS ; k++)
{
//
if(J_point[k]/M/cap-temp < resol){
color[X]=1.0;
color[Y]=1.0;
color[Z]=1.0-(J_point[k]/M/cap-temp)/resol;
}
else if(J_point[k]/M/cap-temp < 2 * resol){
color[X]=1.0;
color[Y]=1.0-(J_point[k]/M/cap-temp-resol)/resol;
color[Z]=0.0;
}
else {
color[X]=1.0;
color[Y]=0.0;
color[Z]=0.0;
}
metaball(h_point[k],color);
}
glutSwapBuffers();
glutPostRedisplay();
}
void mouse_button(int button, int state, int x, int y)
{
if ((state == GLUT_DOWN) && (button == GLUT_LEFT_BUTTON))
motion_p = true;
if ((state == GLUT_DOWN) && (button == GLUT_RIGHT_BUTTON))
motion_w = true;
else if (state == GLUT_UP) {
motion_p = false;
motion_w = false;
}
mouse_old_x = x;
mouse_old_y = y;
}
void mouse_motion(int x, int y)
{
int dx, dy;
dx = x - mouse_old_x;
dy = y - mouse_old_y;
if (motion_p) {
phi -= dx * 0.2;
theta += dy * 0.2;
}
if (motion_w) {
right_motion += dx / 10;
up_motion -= dy / 10;
}
mouse_old_x = x;
mouse_old_y = y;
glutPostRedisplay();
}
void resize(int width, int height)
{
window_width = width;
window_height = height;
}
void keyboard(unsigned char key, int x, int y)
{
switch (key) {
case 'q':
case 'Q':
case '\033':
exit(0);
default:
break;
}
}
bool initGL(void)
{
glClearColor(0.0f, 0.0f , 0.0f, 0.5f);
glEnable(GL_DEPTH_TEST);
glClearDepth(1.0);
glDepthFunc(GL_LESS);
glEnable(GL_LIGHT0);
return true;
}
int main(int argc, char** argv)
{
point = (double **)malloc(sizeof(double *) * dev_points);
for (int i = 0 ; i < dev_points ; i++)
{
point[i] = (double *)malloc(sizeof(double) * 3);
}
glutInit(&argc, argv);
//glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("3D CUDA Simulation");
glutDisplayFunc(display);
glutReshapeFunc(resize);
glutKeyboardFunc(keyboard);
glutMouseFunc(mouse_button);
glutMotionFunc(mouse_motion);
setInitialPosition();
if (!initGL())
return 1;
glutMainLoop();
hipFree(dst_point);
hipFree(de_point);
hipFree(dJ_point);
for (int i = 0 ; i < dev_points ; i++)
{
free (point[i]);
hipFree(d_point[i]);
hipFree(dv_point[i]);
hipFree(v_buff[i]);
hipFree(p_buff[i]);
hipFree(dcolsynctime[i]);
hipFree(dcolsyncindex[i]);
}
free (point);
hipFree(d_point);
hipFree(dv_point);
hipFree(v_buff);
hipFree(p_buff);
hipFree(dcolsynctime);
hipFree(dcolsyncindex);
hipDeviceReset();
return 0;
}
|
9a1822d72e53ad963e88485ce6e02847428cae56.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <GL/glew.h>
#include <GL/gl.h>
#include <GL/glut.h>
#include <GL/freeglut.h>
#include <math.h>
#include <stdbool.h>
#include <omp.h>
#include <cuda.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
//TDB
//衝突角度や距離をスライドで変更できるようにする
#define PI 3.141592653589793
//物理パラメータ
#define cap 1000
#define ref 0.9//0.5
#define temp 4000
#define visc 9
#define GRAV (6.674*0.00000000000000000001)
#define density (2.5 * 1000000000000)
#define sigma (0.96*5.67*0.00000001) //W/m^2 T^4
#define cool (sigma*4*PI*rad*rad*1000000*10)
//粒子形状
#define rad 300//40 //km
#define M (4 / 3 * PI * rad*rad*rad* density)//kg
//描写設定
#define MOONOFFSET_X (INIT_WIDTH/vision*2)
#define MOONOFFSET_Y (INIT_WIDTH/vision*3)
#define MOONOFFSET_Z (INIT_HEIGHT/vision*3)
#define dev 360//12
#define resol 10
#define hollow 100//10
#define X 0
#define Y 1
#define Z 2
#define ANIM_START 0
#define ANIM 20//10
#define scale 0.01
#define colmargin 1.0001//1.0001
#define adjust 0.999
#define R (rad * scale)
#define INIT_WIDTH 800
#define INIT_HEIGHT 800
#define vision 40
#define Grid_x 2//block間は__syncthreadでは同期不可
#define Grid_y 2
#define Grid_z 1
#define Block_x 2//32
#define Block_y 2//16
#define Block_z 2//1
#define NUM_POINTS (Grid_x*Grid_y*Grid_z*Block_x*Block_y*Block_z)
unsigned int dev_points = dev + 1;
unsigned int window_width = INIT_WIDTH;
unsigned int window_height = INIT_HEIGHT;
double vision_size = vision;
float right_motion=0;
float up_motion=0;
double left, right, bottom, top;
float h_point[NUM_POINTS][3]={0};
float v_point[NUM_POINTS][3]={0};
float st_point[NUM_POINTS]={0};
float e_point[NUM_POINTS]={0};
float J_point[NUM_POINTS]={0};
float hv_buff[NUM_POINTS][3]={0};
float hp_buff[NUM_POINTS][3]={0};
float anim_time = ANIM_START;
float anim_dt = ANIM;
double phi = 30.0;
double theta = 30.0;
float light_pos[4]={0};
int mouse_old_x, mouse_old_y;
bool motion_p;
bool motion_w;
double eye[3]={0};
double center[3] = {0.0, 0.0, 0.0};
double up[3]={0};
double ** point;
float (*d_point)[3];
float (*dv_point)[3];
float (*dst_point);
float (*de_point);
float (*dJ_point);
float (*v_buff)[3];
float (*p_buff)[3];
float colsynctime[NUM_POINTS][NUM_POINTS]={0};
int colsyncindex[NUM_POINTS][NUM_POINTS]={0};
float (*dcolsynctime)[NUM_POINTS];
int (*dcolsyncindex)[NUM_POINTS];
__global__ void grav_coldetect(float(*pos)[3],float(*vec)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS],float(*p_buff)[3]);
__global__ void grav_padjust(float(*pos)[3], float(*p_buff)[3]);
__global__ void grav_colv(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]);
__global__ void grav_v(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],int(*colindex)[NUM_POINTS]);
__global__ void grav_vupdate(float(*vec)[3],float(*v_buff)[3]);
__global__ void buff_clear(float(*v_buff)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS],float(*p_buff)[3]);
__global__ void grav_p(float (*pos)[3], float(*vec)[3]);
//基本関数群
double dot(double vec0[], double vec1[])
{
return(vec0[X] * vec1[X] + vec0[Y] * vec1[Y] + vec0[Z] * vec1[Z]);
}
void cross(double vec0[], double vec1[], double vec2[])
{
vec2[X] = vec0[Y] * vec1[Z] - vec0[Z] * vec1[Y];
vec2[Y] = vec0[Z] * vec1[X] - vec0[X] * vec1[Z];
vec2[Z] = vec0[X] * vec1[Y] - vec0[Y] * vec1[X];
}
void normVec(double vec[])
{
double norm;
norm = sqrt(vec[X] * vec[X] + vec[Y] * vec[Y] + vec[Z] * vec[Z]);
vec[X] /= norm;
vec[Y] /= norm;
vec[Z] /= norm;
}
void normal(double p0[], double p1[], double p2[], double normal[])
{
unsigned int i;
double v0[3], v1[3];
for (i = 0; i < 3; i++) {
v0[i] = p2[i] - p1[i];
v1[i] = p0[i] - p1[i];
}
cross(v0, v1, normal);
normVec(normal);
}
//衝突検知
__global__ void grav_coldetect(float(*pos)[3],float(*vec)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS],float(*p_buff)[3])
{
float xn,yn,zn,vx,vy,vz,dis,sq;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
float rvec[3]={0};
xn = pos[index][X];
yn = pos[index][Y];
zn = pos[index][Z];
vx = vec[index][X];
vy = vec[index][Y];
vz = vec[index][Z];
p_buff[index][X]=xn;
p_buff[index][Y]=yn;
p_buff[index][Z]=zn;
for (int i = 0 ; i < NUM_POINTS; i++)
{
sq = (float)pow((double)(xn-pos[i][X]),2) + pow((double)(yn-pos[i][Y]),2) + pow((double)(zn-pos[i][Z]),2);
dis = (float)sqrt((double)sq);
rvec[X]=(pos[i][X]-xn)/dis;
rvec[Y]=(pos[i][Y]-yn)/dis;
rvec[Z]=(pos[i][Z]-zn)/dis;
//衝突域侵入判定
if (dis > 2 * R && i != index)
{
colindex[index][i]=NUM_POINTS;
}
else if (dis <= 2 * R && i != index)
{
//衝突域侵入からの経過の時間を記録
colindex[index][i]=i;
coltime[index][i]=(2*R-dis)/((vx-vec[i][X])*rvec[X]+(vy-vec[i][Y])*rvec[Y]+(vz-vec[i][Z])*rvec[Z]);
//位置補正
if(dis <= 2 * R * adjust)
{
p_buff[index][X]+=(p_buff[index][X]-pos[i][X])/dis*(2*R*colmargin-dis);
p_buff[index][Y]+=(p_buff[index][Y]-pos[i][Y])/dis*(2*R*colmargin-dis);
p_buff[index][Z]+=(p_buff[index][Z]-pos[i][Z])/dis*(2*R*colmargin-dis);
}
}
else
{
colindex[index][i]=NUM_POINTS;
}
}
}
//中心間距離が直径以下に近づいたものを補正
__global__ void grav_padjust(float(*pos)[3], float(*p_buff)[3])
{
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
pos[index][X]=p_buff[index][X];
pos[index][Y]=p_buff[index][Y];
pos[index][Z]=p_buff[index][Z];
}
//衝突後の速度を計算
__global__ void grav_colv(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS])
{
float xn,yn,zn,sq,dis;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
int colnum=0;
float tmptime=0;
int tmpindex=0;
int coldex=0;
float repul=0;
float rvec[3]={0};
float Vl[3]={0};
float Vr[3]={0};
float Vh[3]={0};
float vl_buff[3]={0};
float vr_buff[3]={0};
float vcol_buff[3]={0};
float dotV=0;
xn = pos[index][X];
yn = pos[index][Y];
zn = pos[index][Z];
vl_buff[X]=vec[index][X];
vl_buff[Y]=vec[index][Y];
vl_buff[Z]=vec[index][Z];
for (int i = 0 ; i < NUM_POINTS; i++){
if(colindex[index][i]!=NUM_POINTS){
colnum++;
}
}
if(colnum>0){
//衝突域侵入からの経過時間をインデックス付きソート
for(int i = 0 ; i < NUM_POINTS; i++){
for(int j = i+1; j < NUM_POINTS; j++){
if(coltime[index][i] > coltime[index][j]){
tmptime=coltime[index][i];
tmpindex=colindex[index][i];
coltime[index][i]=coltime[index][j];
colindex[index][i]=colindex[index][j];
coltime[index][j]=tmptime;
colindex[index][j]=tmpindex;
}
}
}
//衝突域侵入からの経過時間が長いものから処理
for (int i=NUM_POINTS-1 ; i>=NUM_POINTS-colnum; i--){
coldex=colindex[index][i];
sq = (float)pow((double)(xn-pos[coldex][X]),2)+pow((double)(yn-pos[coldex][Y]),2)+pow((double)(zn-pos[coldex][Z]),2);
dis = (float)sqrt((double)sq);
//衝突の運動量の単位ベクトル
rvec[X]=(pos[coldex][X]-xn)/dis;
rvec[Y]=(pos[coldex][Y]-yn)/dis;
rvec[Z]=(pos[coldex][Z]-zn)/dis;
//自分の速度ベクトルの法線成分
dotV=rvec[X]*vl_buff[X]+rvec[Y]*vl_buff[Y]+rvec[Z]*vl_buff[Z];
Vl[X]=dotV*rvec[X];
Vl[Y]=dotV*rvec[Y];
Vl[Z]=dotV*rvec[Z];
//相手の速度ベクトルの法線成分
dotV=rvec[X]*vec[coldex][X]+rvec[Y]*vec[coldex][Y]+rvec[Z]*vec[coldex][Z];
Vr[X]=dotV*rvec[X];
Vr[Y]=dotV*rvec[Y];
Vr[Z]=dotV*rvec[Z];
//自分の速度ベクトルの水平成分
Vh[X]=vl_buff[X]-Vl[X];
Vh[Y]=vl_buff[Y]-Vl[Y];
Vh[Z]=vl_buff[Z]-Vl[Z];
//反発係数は小さいほうを優先
repul=e[index];
if (e[coldex] < e[index]) {
repul=e[coldex];
}
//速度更新
vcol_buff[X]=Vh[X]+((1+repul)*Vr[X]+(1-repul)*Vl[X])/2;
vcol_buff[Y]=Vh[Y]+((1+repul)*Vr[Y]+(1-repul)*Vl[Y])/2;
vcol_buff[Z]=Vh[Z]+((1+repul)*Vr[Z]+(1-repul)*Vl[Z])/2;
//相手の速度計算
vr_buff[X]=vec[coldex][X]-Vr[X]+((1+repul)*Vl[X]+(1-repul)*Vr[X])/2;
vr_buff[Y]=vec[coldex][Y]-Vr[Y]+((1+repul)*Vl[Y]+(1-repul)*Vr[Y])/2;
vr_buff[Z]=vec[coldex][Z]-Vr[Z]+((1+repul)*Vl[Z]+(1-repul)*Vr[Z])/2;
//衝突エネルギーを粘性の比で分配し熱エネルギー変換
double Energy=0.5*M*(pow(vec[coldex][X],2)+pow(vec[coldex][Y],2)+pow(vec[coldex][Z],2)+pow(vl_buff[X],2)+pow(vl_buff[Y],2)+pow(vl_buff[Z],2) - (pow(vcol_buff[X],2)+pow(vcol_buff[Y],2)+pow(vcol_buff[Z],2)+pow(vr_buff[X],2)+pow(vr_buff[Y],2)+pow(vr_buff[Z],2))) / pow(scale,2) * 1000000;
J[index] += Energy / (pow(10.0,(double)(sti[index]-sti[coldex]))+1);
//温度上限10000000度とする
if (J[index] > M * cap * 10000000){
J[index] = M * cap * 10000000;
}
vl_buff[X]=vcol_buff[X];
vl_buff[Y]=vcol_buff[Y];
vl_buff[Z]=vcol_buff[Z];
//粘性と反発係数の更新 反発係数は温度上昇に対し線形に降下、粘性は100度上昇で1桁降下
e[index] = 1 - ((1-ref)/temp * J[index]/M/cap);
if ( e[index] < 0 ){ e[index] = 0; }
if ( e[index] > 1 ){ e[index] = 1; }
sti[index] = visc - ((J[index]/M/cap - temp) / 100);
}
v_buff[index][X]=vl_buff[X];
v_buff[index][Y]=vl_buff[Y];
v_buff[index][Z]=vl_buff[Z];
}
//放射冷却
J[index]-=cool*(J[index]/M/cap)*(J[index]/M/cap)*(J[index]/M/cap)*(J[index]/M/cap)*ANIM;
//絶対零度以下にはならない
if (J[index] < 0) {
J[index] = 0;
}
}
//重力影響後の速度を計算
__global__ void grav_v(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],int(*colindex)[NUM_POINTS])
{
float xn,yn,zn,vx,vy,vz,sq,dis;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
int colnum=0;
float gravity=0;
xn = pos[index][X];
yn = pos[index][Y];
zn = pos[index][Z];
for (int i = 0 ; i < NUM_POINTS; i++){
if(colindex[index][i]!=NUM_POINTS){
colnum++;
}
}
if(colnum==0){
//衝突なしなら自分以外のすべてから重力影響を受ける
vx = vec[index][X];
vy = vec[index][Y];
vz = vec[index][Z];
for (int i = 0 ; i < NUM_POINTS; i++){
if (i!=index) {
sq = (float)pow((double)(xn-pos[i][X]),2) + pow((double)(yn-pos[i][Y]),2) + pow((double)(zn-pos[i][Z]),2);
gravity=GRAV*M/sq*scale*scale;
dis = (float)sqrt((double)sq);
vx += ((pos[i][X]-xn)/dis)*gravity*ANIM*scale;
vy += ((pos[i][Y]-yn)/dis)*gravity*ANIM*scale;
vz += ((pos[i][Z]-zn)/dis)*gravity*ANIM*scale;
}
}
}
else if(colnum <= 12){//六方最密充填
//衝突ありなら自分と衝突対象以外から重力影響を受ける
vx = v_buff[index][X];
vy = v_buff[index][Y];
vz = v_buff[index][Z];
for (int i = 0 ; i < NUM_POINTS; i++){
sq = (float)pow((double)(xn-pos[i][X]),2) + pow((double)(yn-pos[i][Y]),2) + pow((double)(zn-pos[i][Z]),2);
gravity=GRAV*M/sq*scale*scale;
dis = (float)sqrt((double)sq);
if(dis > 2 * R) {
vx += ((pos[i][X]-xn)/dis)*gravity*ANIM*scale;
vy += ((pos[i][Y]-yn)/dis)*gravity*ANIM*scale;
vz += ((pos[i][Z]-zn)/dis)*gravity*ANIM*scale;
}
}
}
else{
vx = v_buff[index][X];
vy = v_buff[index][Y];
vz = v_buff[index][Z];
}
v_buff[index][X] = vx;
v_buff[index][Y] = vy;
v_buff[index][Z] = vz;
}
__global__ void grav_vupdate(float(*vec)[3],float(*v_buff)[3])
{
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
vec[index][X]=v_buff[index][X];
vec[index][Y]=v_buff[index][Y];
vec[index][Z]=v_buff[index][Z];
}
//バッファ類クリア
__global__ void buff_clear(float(*v_buff)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS],float(*p_buff)[3])
{
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
for (int i=0; i < 3; i++){
v_buff[index][i]=0;
p_buff[index][i]=0;
}
for (int i=0; i < NUM_POINTS; i++){
coltime[index][i]=0;
colindex[index][i]=NUM_POINTS;
}
}
//重力影響後の座標を決定
__global__ void grav_p(float(*pos)[3], float(*vec)[3])
{
float xn,yn,zn,vx,vy,vz;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = ( blockDim.x * (Grid_x - 1) + blockDim.x ) * ( blockDim.y * (Grid_y - 1) + blockDim.y ) * thread_idz + ( blockDim.x * (Grid_x - 1) + blockDim.x ) * thread_idy + thread_idx ;
xn = pos[index][X];
yn = pos[index][Y];
zn = pos[index][Z];
vx = vec[index][X];
vy = vec[index][Y];
vz = vec[index][Z];
pos[index][X] = xn + vx * ANIM;
pos[index][Y] = yn + vy * ANIM;
pos[index][Z] = zn + vz * ANIM;
}
// 粒子を初期位置に配置.
void setInitialPosition(void)
{
for (int i = 0; i < NUM_POINTS; i++) {
for (int j = 0 ; j < 3 ; j++){
h_point[i][j] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision*2 ;
v_point[i][j] = 0;
hv_buff[i][j] = 0;
hp_buff[i][j] = 0;
}
e_point[i]=ref;
J_point[i]=cap*M*temp;
/*
int earth_points = NUM_POINTS - (NUM_POINTS/64);
if(i < earth_points){
for (int j = 0 ; j < 3 ; j++){
h_point[i][j] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision/2 ;
v_point[i][j] = 0;
hv_buff[i][j] = 0;
}
e_point[i]=ref;
J_point[i]=cap*M*temp;
}
else {
h_point[i][X] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision/6 + MOONOFFSET_X;
h_point[i][Y] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision/6 + MOONOFFSET_Y;
h_point[i][Z] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision/6 + MOONOFFSET_Z;
v_point[i][X] = -(MOONOFFSET_X*scale/ANIM)/4.5;
v_point[i][Y] = -(MOONOFFSET_Y*scale/ANIM)/5;
v_point[i][Z] = -(MOONOFFSET_Z*scale/ANIM)/5;
for (int j = 0 ; j < 3 ; j++){
hv_buff[i][j] = 0;
}
e_point[i]=0;
J_point[i]=cap*M*temp*10;
}
*/
st_point[i]=visc;
for (int j = 0; j < NUM_POINTS; j++) {
colsyncindex[i][j]=NUM_POINTS;
}
}
checkCudaErrors(cudaMalloc((void**)&d_point, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&dv_point, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&v_buff, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&p_buff, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&dst_point, NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&de_point, NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&dJ_point, NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&dcolsynctime, NUM_POINTS*NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&dcolsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int)));
checkCudaErrors(cudaMemcpy(d_point, h_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dv_point, v_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(v_buff, hv_buff, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(p_buff, hp_buff, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dst_point, st_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(de_point, e_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dJ_point, J_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dcolsynctime, colsynctime, NUM_POINTS*NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dcolsyncindex, colsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int) , cudaMemcpyHostToDevice));
}
//CUDA実行関数
void launchGPUKernel(unsigned int num_particles,float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS],float(*p_buff)[3])
{
dim3 grid(Grid_x,Grid_y,Grid_z);
dim3 block(Block_x,Block_y,Block_z);
grav_coldetect<<<grid , block>>>(pos, vec,coltime,colindex,p_buff);
grav_padjust<<<grid , block>>>(pos, p_buff);
grav_colv<<<grid , block>>>(pos,vec,v_buff,sti,e,J,coltime,colindex);
grav_v<<<grid , block>>>(pos,vec,v_buff,colindex);
grav_vupdate<<<grid , block>>>(vec,v_buff);
buff_clear<<<grid , block>>>(v_buff,coltime,colindex,p_buff);
grav_p<<<grid , block>>>(pos,vec);
}
//アニメーション動作
void runGPUKernel(void)
{
launchGPUKernel(NUM_POINTS, d_point, dv_point,v_buff,dst_point, de_point,dJ_point,dcolsynctime,dcolsyncindex,p_buff);
checkCudaErrors(cudaMemcpy(h_point, d_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(v_point, dv_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(hv_buff, v_buff, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(hp_buff, p_buff, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(st_point, dst_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(e_point, de_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(J_point, dJ_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(colsynctime,dcolsynctime, NUM_POINTS*NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(colsyncindex,dcolsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int) , cudaMemcpyDeviceToHost));
anim_time += anim_dt;
}
//ビュー定義
void defineViewMatrix(double phi, double theta)
{
unsigned int i;
double c, s, xy_dist;
double x_axis[3], y_axis[3], z_axis[3];
// 視点の設定.
eye[Z] = sin(theta * PI / 180.0);
xy_dist = cos(theta * PI / 180.0);
c = cos(phi * PI / 180.0);
s = sin(phi * PI / 180.0);
eye[X] = xy_dist * c;
eye[Y] = xy_dist * s;
up[X] = - c * eye[Z];
up[Y] = - s * eye[Z];
up[Z] = s * eye[Y] + c * eye[X];
normVec(up);
// 視点を原点とする座標系の定義.
for (i = 0; i < 3; i++)
{
z_axis[i] = eye[i] - center[i];
}
normVec(z_axis);
cross(up, z_axis, x_axis);
normVec(x_axis);
cross(z_axis, x_axis, y_axis);
gluLookAt(eye[X], eye[Y], eye[Z], center[X], center[Y], center[Z], up[X], up[Y], up[Z]);
}
void metaball (float pos[3], float color[3]) {
double margin=0;
double view[3]={0};
double TH=theta;
double PH=-phi;
for (int i = 0 ; i < dev_points ; i ++)
{
view[X] = 0;
view[Y] = R * cos(i * PI * 2 / dev);
view[Z] = R * sin(i * PI * 2 / dev);
//極座標変換
point[i][X] = view[X] * cos(TH * PI / 180) * cos(PH * PI / 180) + view[Y] * sin(PH * PI / 180) - view[Z] * sin(TH * PI / 180) * cos(PH * PI / 180);
point[i][Y] = - view[X] * cos(TH * PI / 180) * sin(PH * PI / 180) + view[Y] * cos(PH * PI / 180) + view[Z] * sin(TH * PI / 180) * sin(PH * PI / 180);
point[i][Z] = view[X] * sin(TH * PI / 180) + view[Z] * cos(TH * PI / 180);
}
//中心の球体を円で描き視点に合わせて向きを変えることで球体に見せる
glBegin(GL_TRIANGLE_FAN);
glColor4f(1,1,1,0.3);
glVertex3d(pos[X],pos[Y],pos[Z]);
for (int i = 0 ; i < dev_points ; i ++)
{
glVertex3d(point[i][X] + pos[X], point[i][Y] + pos[Y], point[i][Z] + pos[Z]);
}
glEnd();
//周囲のボヤ
int mh[dev_points];
for (int i = 0 ; i < dev_points ; i ++)
{
mh[i]=1;
}
glBegin(GL_POINTS);
glColor4f(color[X],color[Y],color[Z],0.1);
for (int k = 0; k < hollow; k++) {
margin=0.5/hollow*k+1;
for (int i = 0 ; i < dev_points ; i ++)
{
if((mh[i]==1 || mh[i-1]==1 || mh[i+1]==1) && (rand() % dev) < (dev * (hollow-k/2)/hollow))
glVertex3d(margin*point[i][X] + pos[X], margin*point[i][Y] + pos[Y], margin*point[i][Z] + pos[Z]);
else
mh[i]=0;
}
}
glEnd();
}
void display(void)
{
light_pos[0] = (float)eye[X];
light_pos[1] = (float)eye[Y];
light_pos[2] = (float)eye[Z];
light_pos[3] = 0.0f;
//CUDA開始
runGPUKernel();
// 光源の設定
glLightfv(GL_LIGHT0, GL_POSITION, light_pos);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-vision_size-right_motion/2, vision_size+right_motion/2, -vision_size-right_motion/2, vision_size+right_motion/2, -100*vision_size, 100*vision_size);
glViewport(0, 0, window_width, window_height);
defineViewMatrix(phi, theta);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//メタボール
float color[3]={0};
for (int k = 0 ; k < NUM_POINTS ; k++)
{
//温度によって色を変化
if(J_point[k]/M/cap-temp < resol){
color[X]=1.0;
color[Y]=1.0;
color[Z]=1.0-(J_point[k]/M/cap-temp)/resol;
}
else if(J_point[k]/M/cap-temp < 2 * resol){
color[X]=1.0;
color[Y]=1.0-(J_point[k]/M/cap-temp-resol)/resol;
color[Z]=0.0;
}
else {
color[X]=1.0;
color[Y]=0.0;
color[Z]=0.0;
}
metaball(h_point[k],color);
}
glutSwapBuffers();
glutPostRedisplay();
}
void mouse_button(int button, int state, int x, int y)
{
if ((state == GLUT_DOWN) && (button == GLUT_LEFT_BUTTON))
motion_p = true;
if ((state == GLUT_DOWN) && (button == GLUT_RIGHT_BUTTON))
motion_w = true;
else if (state == GLUT_UP) {
motion_p = false;
motion_w = false;
}
mouse_old_x = x;
mouse_old_y = y;
}
void mouse_motion(int x, int y)
{
int dx, dy;
dx = x - mouse_old_x;
dy = y - mouse_old_y;
if (motion_p) {
phi -= dx * 0.2;
theta += dy * 0.2;
}
if (motion_w) {
right_motion += dx / 10;
up_motion -= dy / 10;
}
mouse_old_x = x;
mouse_old_y = y;
glutPostRedisplay();
}
void resize(int width, int height)
{
window_width = width;
window_height = height;
}
void keyboard(unsigned char key, int x, int y)
{
switch (key) {
case 'q':
case 'Q':
case '\033':
exit(0);
default:
break;
}
}
bool initGL(void)
{
glClearColor(0.0f, 0.0f , 0.0f, 0.5f);
glEnable(GL_DEPTH_TEST);
glClearDepth(1.0);
glDepthFunc(GL_LESS);
glEnable(GL_LIGHT0);
return true;
}
int main(int argc, char** argv)
{
point = (double **)malloc(sizeof(double *) * dev_points);
for (int i = 0 ; i < dev_points ; i++)
{
point[i] = (double *)malloc(sizeof(double) * 3);
}
glutInit(&argc, argv);
//glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("3D CUDA Simulation");
glutDisplayFunc(display);
glutReshapeFunc(resize);
glutKeyboardFunc(keyboard);
glutMouseFunc(mouse_button);
glutMotionFunc(mouse_motion);
setInitialPosition();
if (!initGL())
return 1;
glutMainLoop();
cudaFree(dst_point);
cudaFree(de_point);
cudaFree(dJ_point);
for (int i = 0 ; i < dev_points ; i++)
{
free (point[i]);
cudaFree(d_point[i]);
cudaFree(dv_point[i]);
cudaFree(v_buff[i]);
cudaFree(p_buff[i]);
cudaFree(dcolsynctime[i]);
cudaFree(dcolsyncindex[i]);
}
free (point);
cudaFree(d_point);
cudaFree(dv_point);
cudaFree(v_buff);
cudaFree(p_buff);
cudaFree(dcolsynctime);
cudaFree(dcolsyncindex);
cudaDeviceReset();
return 0;
}
|
1f5a6766057f84ce99d61c69fa01929cfce49d51.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* .optix.cu - Copyright 2019/2020 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file contains a minimal set of Optix functions. From here we will
dispatch program flow to our own functions that implement the path tracer.
*/
#include "../kernels/noerrors.h"
#include "helper_math.h"
// function defintion helper
#define LH2_DEVFUNC static __forceinline__ __device__
// global include files
#include "../../RenderSystem/common_settings.h"
#include "../../RenderSystem/common_functions.h"
#include "../../RenderSystem/common_types.h"
#define OPTIX_CU // skip CUDAMaterial definition in core_settings.h; not needed here
#include "../core_settings.h"
// global path tracing parameters
extern "C" { __constant__ Params params; }
// tools
__device__ __inline__ uint WangHash( uint s ) { s = (s ^ 61) ^ (s >> 16), s *= 9, s = s ^ (s >> 4), s *= 0x27d4eb2d, s = s ^ (s >> 15); return s; }
__device__ __inline__ uint RandomInt( uint& s ) { s ^= s << 13, s ^= s >> 17, s ^= s << 5; return s; }
__device__ __inline__ float RandomFloat( uint& s ) { return RandomInt( s ) * 2.3283064365387e-10f; }
static __inline __device__ float blueNoiseSampler( int x, int y, int sampleIndex, int sampleDimension )
{
// Adapated from E. Heitz. Arguments:
// sampleIndex: 0..255
// sampleDimension: 0..255
x &= 127, y &= 127, sampleIndex &= 255, sampleDimension &= 255;
// xor index based on optimized ranking
int rankedSampleIndex = (sampleIndex ^ params.blueNoise[sampleDimension + (x + y * 128) * 8 + 65536 * 3]) & 255;
// fetch value in sequence
int value = params.blueNoise[sampleDimension + rankedSampleIndex * 256];
// if the dimension is optimized, xor sequence value based on optimized scrambling
value ^= params.blueNoise[(sampleDimension & 7) + (x + y * 128) * 8 + 65536];
// convert to float and return
return (0.5f + value) * (1.0f / 256.0f);
}
LH2_DEVFUNC float4 blueNoiseSampler4( int x, int y, int sampleIndex, int sampleDimension )
{
// Optimized retrieval of 4 blue noise samples.
const uint4 bn4 = *((uint4*)(params.blueNoise + sampleDimension + (x + y * 128) * 8 + 65536 * 3));
const int rsi1 = (sampleIndex ^ bn4.x) & 255, rsi2 = (sampleIndex ^ bn4.y) & 255;
const int rsi3 = (sampleIndex ^ bn4.z) & 255, rsi4 = (sampleIndex ^ bn4.w) & 255;
const int v1 = params.blueNoise[sampleDimension + 0 + rsi1 * 256];
const int v2 = params.blueNoise[sampleDimension + 1 + rsi2 * 256];
const int v3 = params.blueNoise[sampleDimension + 2 + rsi3 * 256];
const int v4 = params.blueNoise[sampleDimension + 3 + rsi4 * 256];
const uint4 bx4 = *((uint4*)(params.blueNoise + (sampleDimension & 7) + (x + y * 128) * 8 + 65536));
return make_float4( (0.5f + (v1 ^ bx4.x)) * (1.0f / 256.0f), (0.5f + (v2 ^ bx4.y)) * (1.0f / 256.0f),
(0.5f + (v3 ^ bx4.z)) * (1.0f / 256.0f), (0.5f + (v4 ^ bx4.w)) * (1.0f / 256.0f) );
}
static __inline __device__ float3 RandomPointOnLens( const float r0, float r1 )
{
const float blade = (int)(r0 * 9);
float r2 = (r0 - blade * (1.0f / 9.0f)) * 9.0f;
float x1, y1, x2, y2;
__sincosf( blade * PI / 4.5f, &x1, &y1 );
__sincosf( (blade + 1.0f) * PI / 4.5f, &x2, &y2 );
if ((r1 + r2) > 1) r1 = 1.0f - r1, r2 = 1.0f - r2;
const float xr = x1 * r1 + x2 * r2;
const float yr = y1 * r1 + y2 * r2;
float4 posLens = params.posLensSize;
return make_float3( posLens ) + posLens.w * (params.right * xr + params.up * yr);
}
static __inline __device__ void generateEyeRay( float3& O, float3& D, const uint pixelIdx, const uint sampleIdx, uint& seed )
{
// random point on pixel and lens
int sx = pixelIdx % params.scrsize.x;
int sy = pixelIdx / params.scrsize.x;
int shift = params.shift;
float4 r4;
if (sampleIdx < 64)
{
r4 = blueNoiseSampler4( (sx + (shift & 127)) & 127, (sy + (shift >> 24)) & 127, sampleIdx, 0 );
}
else
{
r4.x = RandomFloat( seed ), r4.y = RandomFloat( seed );
r4.z = RandomFloat( seed ), r4.w = RandomFloat( seed );
}
O = RandomPointOnLens( r4.x, r4.z );
float3 posOnPixel = RayTarget( sx, sy, r4.y, r4.w, make_int2( params.scrsize ), params.distortion, params.p1, params.right, params.up );
D = normalize( posOnPixel - O );
}
#if __CUDA_ARCH__ >= 700
#define THREADMASK __activemask() // volta, turing
#else
#define THREADMASK 0xffffffff // pascal, kepler, fermi
#endif
__device__ void setupPrimaryRay( const uint pathIdx, const uint stride )
{
const uint pixelIdx = pathIdx % (params.scrsize.x * params.scrsize.y);
const uint sampleIdx = pathIdx / (params.scrsize.x * params.scrsize.y) + params.pass;
uint seed = WangHash( pathIdx * 16789 + params.pass * 1791 );
// generate eye ray
float3 O, D;
generateEyeRay( O, D, pixelIdx, sampleIdx, seed );
// populate path state array
params.pathStates[pathIdx] = make_float4( O, __uint_as_float( (pathIdx << 6) + 1 /* S_SPECULAR in CUDA code */ ) );
params.pathStates[pathIdx + stride] = make_float4( D, 0 );
// trace eye ray
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, O, D, params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
if (pixelIdx < stride /* OptiX bug workaround? */) if (u2 != 0xffffffff) /* bandwidth reduction */
params.hitData[pathIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) );
}
__device__ void setupSecondaryRay( const uint rayIdx, const uint stride )
{
const float4 O4 = params.pathStates[rayIdx];
const float4 D4 = params.pathStates[rayIdx + stride];
float4 result = make_float4( 0, 0, __int_as_float( -1 ), 0 );
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
if (rayIdx < stride /* OptiX bug workaround? */) if (u2 != 0xffffffff) /* bandwidth reduction */
params.hitData[rayIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) );
}
__device__ void generateShadowRay( const uint rayIdx, const uint stride )
{
const float4 O4 = params.connectData[rayIdx]; // O4
const float4 D4 = params.connectData[rayIdx + stride * 2]; // D4
// launch shadow ray
uint u0 = 1;
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, D4.w, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, 1, 2, 1, u0 );
if (u0) return;
const float4 E4 = params.connectData[rayIdx + stride * 2 * 2]; // E4
const int pixelIdx = __float_as_int( E4.w );
if (pixelIdx < stride /* OptiX bug workaround? */) params.accumulator[pixelIdx] += make_float4( E4.x, E4.y, E4.z, 1 );
}
extern "C" __global__ void __raygen__rg()
{
const uint stride = params.scrsize.x * params.scrsize.y * params.scrsize.z;
const uint3 idx = optixGetLaunchIndex();
const uint rayIdx = idx.x + idx.y * params.scrsize.x;
switch (params.phase)
{
case Params::SPAWN_PRIMARY: /* primary rays */ setupPrimaryRay( rayIdx, stride ); break;
case Params::SPAWN_SECONDARY: /* secondary rays */ setupSecondaryRay( rayIdx, stride ); break;
case Params::SPAWN_SHADOW: /* shadow rays */ generateShadowRay( rayIdx, stride ); break;
}
}
extern "C" __global__ void __miss__occlusion()
{
optixSetPayload_0( 0u ); // instead of any hit. suggested by WillUsher.io.
}
extern "C" __global__ void __closesthit__radiance()
{
const uint prim_idx = optixGetPrimitiveIndex();
const uint inst_idx = optixGetInstanceIndex();
const float2 bary = optixGetTriangleBarycentrics();
const float tmin = optixGetRayTmax();
optixSetPayload_0( (uint)(65535.0f * bary.x) + ((uint)(65535.0f * bary.y) << 16) );
optixSetPayload_1( inst_idx );
optixSetPayload_2( prim_idx );
optixSetPayload_3( __float_as_uint( tmin ) );
}
// EOF
|
1f5a6766057f84ce99d61c69fa01929cfce49d51.cu
|
/* .optix.cu - Copyright 2019/2020 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file contains a minimal set of Optix functions. From here we will
dispatch program flow to our own functions that implement the path tracer.
*/
#include "../kernels/noerrors.h"
#include "helper_math.h"
// function defintion helper
#define LH2_DEVFUNC static __forceinline__ __device__
// global include files
#include "../../RenderSystem/common_settings.h"
#include "../../RenderSystem/common_functions.h"
#include "../../RenderSystem/common_types.h"
#define OPTIX_CU // skip CUDAMaterial definition in core_settings.h; not needed here
#include "../core_settings.h"
// global path tracing parameters
extern "C" { __constant__ Params params; }
// tools
__device__ __inline__ uint WangHash( uint s ) { s = (s ^ 61) ^ (s >> 16), s *= 9, s = s ^ (s >> 4), s *= 0x27d4eb2d, s = s ^ (s >> 15); return s; }
__device__ __inline__ uint RandomInt( uint& s ) { s ^= s << 13, s ^= s >> 17, s ^= s << 5; return s; }
__device__ __inline__ float RandomFloat( uint& s ) { return RandomInt( s ) * 2.3283064365387e-10f; }
static __inline __device__ float blueNoiseSampler( int x, int y, int sampleIndex, int sampleDimension )
{
// Adapated from E. Heitz. Arguments:
// sampleIndex: 0..255
// sampleDimension: 0..255
x &= 127, y &= 127, sampleIndex &= 255, sampleDimension &= 255;
// xor index based on optimized ranking
int rankedSampleIndex = (sampleIndex ^ params.blueNoise[sampleDimension + (x + y * 128) * 8 + 65536 * 3]) & 255;
// fetch value in sequence
int value = params.blueNoise[sampleDimension + rankedSampleIndex * 256];
// if the dimension is optimized, xor sequence value based on optimized scrambling
value ^= params.blueNoise[(sampleDimension & 7) + (x + y * 128) * 8 + 65536];
// convert to float and return
return (0.5f + value) * (1.0f / 256.0f);
}
LH2_DEVFUNC float4 blueNoiseSampler4( int x, int y, int sampleIndex, int sampleDimension )
{
// Optimized retrieval of 4 blue noise samples.
const uint4 bn4 = *((uint4*)(params.blueNoise + sampleDimension + (x + y * 128) * 8 + 65536 * 3));
const int rsi1 = (sampleIndex ^ bn4.x) & 255, rsi2 = (sampleIndex ^ bn4.y) & 255;
const int rsi3 = (sampleIndex ^ bn4.z) & 255, rsi4 = (sampleIndex ^ bn4.w) & 255;
const int v1 = params.blueNoise[sampleDimension + 0 + rsi1 * 256];
const int v2 = params.blueNoise[sampleDimension + 1 + rsi2 * 256];
const int v3 = params.blueNoise[sampleDimension + 2 + rsi3 * 256];
const int v4 = params.blueNoise[sampleDimension + 3 + rsi4 * 256];
const uint4 bx4 = *((uint4*)(params.blueNoise + (sampleDimension & 7) + (x + y * 128) * 8 + 65536));
return make_float4( (0.5f + (v1 ^ bx4.x)) * (1.0f / 256.0f), (0.5f + (v2 ^ bx4.y)) * (1.0f / 256.0f),
(0.5f + (v3 ^ bx4.z)) * (1.0f / 256.0f), (0.5f + (v4 ^ bx4.w)) * (1.0f / 256.0f) );
}
static __inline __device__ float3 RandomPointOnLens( const float r0, float r1 )
{
const float blade = (int)(r0 * 9);
float r2 = (r0 - blade * (1.0f / 9.0f)) * 9.0f;
float x1, y1, x2, y2;
__sincosf( blade * PI / 4.5f, &x1, &y1 );
__sincosf( (blade + 1.0f) * PI / 4.5f, &x2, &y2 );
if ((r1 + r2) > 1) r1 = 1.0f - r1, r2 = 1.0f - r2;
const float xr = x1 * r1 + x2 * r2;
const float yr = y1 * r1 + y2 * r2;
float4 posLens = params.posLensSize;
return make_float3( posLens ) + posLens.w * (params.right * xr + params.up * yr);
}
static __inline __device__ void generateEyeRay( float3& O, float3& D, const uint pixelIdx, const uint sampleIdx, uint& seed )
{
// random point on pixel and lens
int sx = pixelIdx % params.scrsize.x;
int sy = pixelIdx / params.scrsize.x;
int shift = params.shift;
float4 r4;
if (sampleIdx < 64)
{
r4 = blueNoiseSampler4( (sx + (shift & 127)) & 127, (sy + (shift >> 24)) & 127, sampleIdx, 0 );
}
else
{
r4.x = RandomFloat( seed ), r4.y = RandomFloat( seed );
r4.z = RandomFloat( seed ), r4.w = RandomFloat( seed );
}
O = RandomPointOnLens( r4.x, r4.z );
float3 posOnPixel = RayTarget( sx, sy, r4.y, r4.w, make_int2( params.scrsize ), params.distortion, params.p1, params.right, params.up );
D = normalize( posOnPixel - O );
}
#if __CUDA_ARCH__ >= 700
#define THREADMASK __activemask() // volta, turing
#else
#define THREADMASK 0xffffffff // pascal, kepler, fermi
#endif
__device__ void setupPrimaryRay( const uint pathIdx, const uint stride )
{
const uint pixelIdx = pathIdx % (params.scrsize.x * params.scrsize.y);
const uint sampleIdx = pathIdx / (params.scrsize.x * params.scrsize.y) + params.pass;
uint seed = WangHash( pathIdx * 16789 + params.pass * 1791 );
// generate eye ray
float3 O, D;
generateEyeRay( O, D, pixelIdx, sampleIdx, seed );
// populate path state array
params.pathStates[pathIdx] = make_float4( O, __uint_as_float( (pathIdx << 6) + 1 /* S_SPECULAR in CUDA code */ ) );
params.pathStates[pathIdx + stride] = make_float4( D, 0 );
// trace eye ray
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, O, D, params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
if (pixelIdx < stride /* OptiX bug workaround? */) if (u2 != 0xffffffff) /* bandwidth reduction */
params.hitData[pathIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) );
}
__device__ void setupSecondaryRay( const uint rayIdx, const uint stride )
{
const float4 O4 = params.pathStates[rayIdx];
const float4 D4 = params.pathStates[rayIdx + stride];
float4 result = make_float4( 0, 0, __int_as_float( -1 ), 0 );
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
if (rayIdx < stride /* OptiX bug workaround? */) if (u2 != 0xffffffff) /* bandwidth reduction */
params.hitData[rayIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) );
}
__device__ void generateShadowRay( const uint rayIdx, const uint stride )
{
const float4 O4 = params.connectData[rayIdx]; // O4
const float4 D4 = params.connectData[rayIdx + stride * 2]; // D4
// launch shadow ray
uint u0 = 1;
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, D4.w, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, 1, 2, 1, u0 );
if (u0) return;
const float4 E4 = params.connectData[rayIdx + stride * 2 * 2]; // E4
const int pixelIdx = __float_as_int( E4.w );
if (pixelIdx < stride /* OptiX bug workaround? */) params.accumulator[pixelIdx] += make_float4( E4.x, E4.y, E4.z, 1 );
}
extern "C" __global__ void __raygen__rg()
{
const uint stride = params.scrsize.x * params.scrsize.y * params.scrsize.z;
const uint3 idx = optixGetLaunchIndex();
const uint rayIdx = idx.x + idx.y * params.scrsize.x;
switch (params.phase)
{
case Params::SPAWN_PRIMARY: /* primary rays */ setupPrimaryRay( rayIdx, stride ); break;
case Params::SPAWN_SECONDARY: /* secondary rays */ setupSecondaryRay( rayIdx, stride ); break;
case Params::SPAWN_SHADOW: /* shadow rays */ generateShadowRay( rayIdx, stride ); break;
}
}
extern "C" __global__ void __miss__occlusion()
{
optixSetPayload_0( 0u ); // instead of any hit. suggested by WillUsher.io.
}
extern "C" __global__ void __closesthit__radiance()
{
const uint prim_idx = optixGetPrimitiveIndex();
const uint inst_idx = optixGetInstanceIndex();
const float2 bary = optixGetTriangleBarycentrics();
const float tmin = optixGetRayTmax();
optixSetPayload_0( (uint)(65535.0f * bary.x) + ((uint)(65535.0f * bary.y) << 16) );
optixSetPayload_1( inst_idx );
optixSetPayload_2( prim_idx );
optixSetPayload_3( __float_as_uint( tmin ) );
}
// EOF
|
7c219ce882314636396c994be6c3a737c7d78f89.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//CUDA implementation of k-means using DTW
//Author: Jeffrey Wong
//Last Modified: 27 May 2011
//Status: Working
//To Do: Code decomposition. Want to write DTW, kmeans, and tsdist as separate
//files. Problem, NVCC does not allow a kernel to use a device function from
//another file. How can we have the kmeans kernel refer to a DTW file?
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<time.h>
#include<math.h>
#include<cuda.h>
#include"cuPrintf.cu"
//////
//MISC
//////
int checkCUDAError()
{
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
printf("Error during initialization: %s\n", hipGetErrorString(error));
return -1;
}
return 1;
}
/////////////
//DTW HELPERS
/////////////
__host__ __device__ float min(float x, float y, float z)
{
float smallest = x;
if(smallest > y)
smallest = y;
if(smallest > z)
smallest = z;
return smallest;
}
//local distance for dtw
__host__ __device__ float distance(float x, float y)
{
if(x > y)
return (x-y);
else return (y-x);
}
//////////
//CUDA DTW
//////////
__device__ float cudaDTW(float* x, float* y, int length, float* DTW, int index)
{
int n = length+1;
float* dtw = DTW + index*n;
for(int i = 0; i < n; i++)
{
dtw[i] = 0;
}
float cost;
float next;
for(int i=0; i < n; i++)
{
dtw[i] = 9999;
}
float prev = 9999;
dtw[0] = 0;
for(int i = 0; i < length; i++)
{
for(int j = 0; j < length; j++)
{
cost = distance(x[i], y[j]);
next = cost + min(dtw[j+1], prev, dtw[j]);
if(i == length - 1 && j == length-1)
{
return next;
}
dtw[j+1] = next;
dtw[j] = prev;
prev = next;
}
}
return next;
}
float DTW(float* x, float* y, int length)
{
int n = (length+1);
float* DTW = (float*) malloc(n * sizeof(float));
if(DTW == NULL) { return -1; }
for(int i = 0; i < n; i++)
{
DTW[i] = 0;
}
float cost;
float next;
for(int i=0; i < n; i++)
{
DTW[i] = 9999;
}
float prev = 9999;
DTW[0] = 0;
for(int i = 0; i < length; i++)
{
for(int j = 0; j < length; j++)
{
cost = distance(x[i], y[j]);
next = cost + min(DTW[j+1], prev, DTW[j]);
DTW[j+1] = next;
DTW[j] = prev;
prev = next;
}
}
free(DTW);
return next;
}
////////////////
//KMEANS KERNELS
////////////////
__global__ void cudaKMeansDistances(float* d_data, float* d_centers, float* DTW, int numTS, int ts_length, int k, int* d_clusters)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i >= numTS) return;
float* ts = d_data + i*ts_length;
float shortestDistance = cudaDTW(ts, d_centers, ts_length, DTW, i);
int shortestDistanceIndex = 0;
for(int j = 1; j < k; j++)
{
float* center = d_centers + j*ts_length;
float distance = cudaDTW(ts, center, ts_length, DTW, i);
if(distance < shortestDistance)
{
shortestDistance = distance;
shortestDistanceIndex = j;
}
}
d_clusters[i] = shortestDistanceIndex;
}
__global__ void cudaKMeansCenters(float* d_data, float* d_centers, int numTS, int ts_length, int k, int* d_clusters)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i >= ts_length) return;
extern __shared__ int clusterSize[];
if(threadIdx.x == 0)
{
for(int j = 0; j < k; j++)
{
clusterSize[j] = 0;
}
for(int j = 0; j < numTS; j++)
{
int cluster = d_clusters[j];
clusterSize[cluster] = clusterSize[cluster] + 1;
}
}
__syncthreads();
for(int j = 0; j < k; j++)
{
float* center = d_centers + j*ts_length;
center[i] = 0;
}
for(int j = 0; j < numTS; j++)
{
float* ts = d_data + j*ts_length;
int cluster = d_clusters[j];
float* center = d_centers + cluster*ts_length;
center[i] = center[i] + ts[i];
}
for(int j = 0; j < k; j++)
{
float* center = d_centers + j*ts_length;
center[i] = center[i] / clusterSize[j];
}
}
////////////////////
//R KMEANS WITH CUDA
////////////////////
//Entry point for R to run cudaKMeans
extern "C" void cudaRKMeans(float* data, float* centers, int* numTSR, int* ts_lengthR, int* kR, int* clusters, float* withinss, int* success)
{
int numTS = numTSR[0];
int ts_length = ts_lengthR[0];
int k = kR[0];
float* d_data;
float* d_centers;
int* d_clusters;
float* dtw;
if(hipSuccess != hipMalloc( (void**)&d_data, numTS * ts_length * sizeof(float)))
{
printf("Could not hipMalloc data\n");
success[0] = -1;
return;
}
if(hipSuccess != hipMalloc( (void**)&d_centers, k * ts_length * sizeof(float)))
{
printf("Could not hipMalloc centers\n");
hipFree(d_data);
success[0] = -1;
return;
}
if(hipSuccess != hipMalloc( (void**)&d_clusters, numTS * sizeof(int)))
{
printf("Could not hipMalloc clusters\n");
hipFree(d_data);
hipFree(d_centers);
success[0] = -1;
return;
}
if(hipSuccess != hipMalloc( (void**)&dtw, numTS * (ts_length+1) * sizeof(float)))
{
printf("Could not hipMalloc DTW\n");
hipFree(d_data);
hipFree(d_centers);
hipFree(d_clusters);
success[0] = -1;
return;
}
hipMemcpy(d_data, data, numTS*ts_length*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_centers, centers, k*ts_length*sizeof(float), hipMemcpyHostToDevice);
if(checkCUDAError() == -1)
{
return;
}
printf("Computing k-means...\n");
int numIters = 10;
int blocksize = 512;
int gridsizeDistances = numTS / blocksize;
int gridsizeCenters = ts_length / blocksize;
for(int i = 0; i < numIters; i++)
{
printf("Begin iteration %d\n", i);
hipDeviceSynchronize();
hipLaunchKernelGGL(( cudaKMeansDistances), dim3(gridsizeDistances + 1), dim3(blocksize), 0, 0, d_data, d_centers, dtw, numTS, ts_length, k, d_clusters);
hipDeviceSynchronize();
hipLaunchKernelGGL(( cudaKMeansCenters), dim3(gridsizeCenters + 1), dim3(blocksize), k * sizeof(float), 0, d_data, d_centers, numTS, ts_length, k, d_clusters);
}
hipDeviceSynchronize();
hipMemcpy(clusters, d_clusters, numTS*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(centers, d_centers, k * ts_length * sizeof(float), hipMemcpyDeviceToHost);
//Compute withinss
for(int i = 0; i < numTS; i++)
{
float* ts = data + ts_length*i;
int cluster = clusters[i];
float* center = centers + cluster*ts_length;
float distanceToCenter = DTW(ts, center, ts_length);
withinss[cluster] = withinss[cluster] + (distanceToCenter * distanceToCenter);
}
for(int i = 0; i < numTS; i++)
{
printf("%d ", clusters[i]);
}
printf("\n");
hipFree(d_data);
hipFree(d_centers);
hipFree(d_clusters);
hipFree(dtw);
success[0] = 1;
if(checkCUDAError() == -1)
{
return;
}
hipDeviceReset();
}
/////////////////
//R KMEANS WITH C
/////////////////
//Entry point for R to run host KMeans
extern "C" void RKMeans(float* data, float* centers, int* numTSR, int* ts_lengthR, int* kR, int* clusters, float* withinss, int* success)
{
int numTS = numTSR[0];
int ts_length = ts_lengthR[0];
int k = kR[0];
int* ktable = (int*)malloc(sizeof(int) * k);
for(int i = 0; i < k; i++)
{
ktable[i] = 0;
}
int numIters = 10;
for(int rep = 0; rep < numIters; rep++)
{
printf("Begin iteration %d\n", rep);
//Compute cluster assignments and cluster size
for(int i = 0; i < numTS; i++)
{
float* ts = data + ts_length*i;
float smallestDistance = DTW(ts, centers, ts_length);
if(smallestDistance == -1) {success[0] = -1; return;}
int smallestDistanceIndex = 0;
//printf("DTW for TS %d with center 0: %f\n", i, smallestDistance);
for(int j = 1; j < k; j++)
{
float* center = centers + ts_length*j;
float dtw = DTW(ts, center, ts_length);
if(dtw == -1) {success[0] = -1; return;}
//printf("DTW for TS %d with center %d: %f\n", i, j, dtw);
if(dtw < smallestDistance)
{
smallestDistance = dtw;
smallestDistanceIndex = j;
}
}
//printf("Assinging TS %d to cluster %d\n", i, smallestDistanceIndex);
clusters[i] = smallestDistanceIndex;
ktable[smallestDistanceIndex] = ktable[smallestDistanceIndex] + 1;
}
//Reset centers
for(int i = 0; i < k*ts_length; i++)
{
centers[i] = 0;
}
//Set centers to be center of data
for(int i = 0; i < numTS; i++)
{
float* ts = data + ts_length*i;
int cluster = clusters[i];
float* center = centers + ts_length*cluster;
for(int j = 0; j < ts_length; j++)
{
center[j] = center[j] + ts[j];
}
}
for(int i = 0; i < k; i++)
{
float* center = centers + ts_length*i;
int clusterSize = ktable[i];
for(int j = 0; j < ts_length; j++)
{
center[j] = center[j] / clusterSize;
}
}
for(int i = 0; i < k; i++)
{
ktable[i] = 0;
}
}
//Final Steps:
//Compute withinss
//Reindex cluster assignments to start with 1
//Clean up memory
for(int i = 0; i < numTS; i++)
{
float* ts = data + ts_length*i;
int cluster = clusters[i];
float* center = centers + cluster*ts_length;
float distanceToCenter = DTW(ts, center, ts_length);
withinss[cluster] = withinss[cluster] + (distanceToCenter*distanceToCenter);
}
free(ktable);
success[0] = 1;
}
//////////////////
//R TS DIST WITH C
//////////////////
//Entry point for R to run host TS distance matrix
extern "C" void RTSDist(float* data, int* numTSR, int* ts_lengthR, float* distances)
{
int numTS = numTSR[0];
int ts_length = ts_lengthR[0];
for(int i = 0; i < numTS; i++)
{
float* ts1 = data + i*ts_length;
for(int j = i+1; j < numTS; j++)
{
float* ts2 = data + j*ts_length;
float distance = DTW(ts1, ts2, ts_length);
if(i == 0)
distances[j-1] = distance;
else
distances[numTS*i - ((i+1)*(i+2)/2) + j] = distance;
}
}
}
//////////////////////
//R TS DIST WITH CUDA
//////////////////////
__global__ void cudaTSDistKer(float* d_data, float* DTW, int numTS, int ts_length, int totalComparisons, float* d_distances)
{
int z = blockDim.x * blockIdx.x + threadIdx.x;
if(z >= totalComparisons) return;
int i, j;
float a = -1;
float b = -1 + 2*numTS;
float c = -2*z;
float i1 = (-b + sqrt(b*b - 4*a*c)) / (2*a);
i = (int)i1;
float j1 = z - i*numTS + ((i1+1)*(i1+2)/2);
j = (int)j1;
float* ts1 = d_data + i * ts_length;
float* ts2 = d_data + j * ts_length;
float dtw = cudaDTW(ts1, ts2, ts_length, DTW, z);
d_distances[z] = dtw;
}
//Entry point for R to run GPU TS distance matrix
extern "C" void cudaRTSDist(float* data, int* numTSR, int* ts_lengthR, float* distances, int* success)
{
int numTS = numTSR[0];
int ts_length = ts_lengthR[0];
float* d_data;
float* d_distances;
float* dtw;
int totalComparisons = ((float)numTS/2) * ((float)numTS-1);
printf("%d ", totalComparisons);
if(hipSuccess != hipMalloc( (void**)&d_data, numTS * ts_length * sizeof(float)))
{
printf("Could not hipMalloc data\n");
return;
}
if(hipSuccess != hipMalloc( (void**)&d_distances, totalComparisons * sizeof(float)))
{
printf("Could not hipMalloc distances\n");
hipFree(d_data);
return;
}
if(hipSuccess != hipMalloc( (void**)&dtw, totalComparisons * (ts_length + 1) * sizeof(float)))
{
printf("Could not hipMalloc dtw\n");
hipFree(d_data);
hipFree(d_distances);
return;
}
hipMemcpy(d_data, data, numTS * ts_length * sizeof(float), hipMemcpyHostToDevice);
int blocksize = totalComparisons;
if(blocksize > 512)
blocksize = 512;
int gridsize = totalComparisons / blocksize;
printf("Calculating distances...\n");
hipLaunchKernelGGL(( cudaTSDistKer), dim3(gridsize+1), dim3(blocksize), 0, 0, d_data, dtw, numTS, ts_length, totalComparisons, d_distances);
hipDeviceSynchronize();
hipMemcpy(distances, d_distances, totalComparisons * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_data);
hipFree(d_distances);
hipFree(dtw);
hipDeviceReset();
success[0] = 1;
}
///////////////////////////////////
//KMEANS ON THE HOST FOR REFERENCE
///////////////////////////////////
int* cpuKMeans(float* data, float* centers, int numTS, int ts_length, int k)
{
int* clusters = (int*)malloc(sizeof(int) * numTS);
int* ktable = (int*)malloc(sizeof(int) * k);
for(int i = 0; i < k; i++)
{
ktable[i] = 0;
}
int numIters = 10;
for(int rep = 0; rep < numIters; rep++)
{
printf("Begin iteration %d\n", rep);
//Compute cluster assignments and cluster size
for(int i = 0; i < numTS; i++)
{
float* ts = data + ts_length*i;
float smallestDistance = DTW(ts, centers, ts_length);
int smallestDistanceIndex = 0;
//printf("DTW for TS %d with center 0: %f\n", i, smallestDistance);
for(int j = 1; j < k; j++)
{
float* center = centers + ts_length*j;
float dtw = DTW(ts, center, ts_length);
//printf("DTW for TS %d with center %d: %f\n", i, j, dtw);
if(dtw < smallestDistance)
{
smallestDistance = dtw;
smallestDistanceIndex = j;
}
}
//printf("Assinging TS %d to cluster %d\n", i, smallestDistanceIndex);
clusters[i] = smallestDistanceIndex;
ktable[smallestDistanceIndex] = ktable[smallestDistanceIndex] + 1;
}
//Reset centers
for(int i = 0; i < k*ts_length; i++)
{
centers[i] = 0;
}
//Set centers to be center of data
for(int i = 0; i < numTS; i++)
{
float* ts = data + ts_length*i;
int cluster = clusters[i];
float* center = centers + ts_length*cluster;
for(int j = 0; j < ts_length; j++)
{
center[j] = center[j] + ts[j];
}
}
for(int i = 0; i < k; i++)
{
float* center = centers + ts_length*i;
int clusterSize = ktable[i];
for(int j = 0; j < ts_length; j++)
{
center[j] = center[j] / clusterSize;
}
}
for(int i = 0; i < k; i++)
{
ktable[i] = 0;
}
}
//Final Steps:
//Clean up memory
free(ktable);
return clusters;
}
int main(int argc, char** argv)
{
if(argc != 5)
{
printf("Usage: ./kmeans <-cpu or -gpu> <numTS> <ts_length> <k>\n");
return -1;
}
srand(100);
int numTS = atoi(argv[2]);
int ts_length = atoi(argv[3]);
int k = atoi(argv[4]);
float* data = (float*)malloc(sizeof(float) * numTS * ts_length);
for(int i = 0; i < numTS*ts_length; i++)
{
if(i < numTS*ts_length / 2)
data[i] = rand() % 300;
else data[i] = (rand() % 10) * (rand() % 10);
}
float* centers = (float*)malloc(sizeof(float) * k * ts_length);
for(int i = 0; i < k * ts_length; i++)
{
if(i < k*ts_length)
centers[i] = rand() % 150;
else centers[i] = (rand() % 10) * (rand() % 10);
}
if(strcmp(argv[1], "-cpu") == 0)
{
int* clusters = cpuKMeans(data, centers, numTS, ts_length, k);
for(int i = 0; i < numTS; i++)
{
printf("%d ", clusters[i]);
}
printf("\n");
free(data);
free(centers);
free(clusters);
return 0;
}
else if(strcmp(argv[1], "-gpu") == 0)
{
int* clusters = (int*)malloc(numTS * sizeof(int));
float* withinss = (float*)malloc(k * sizeof(float));
int* success = (int*)malloc( sizeof(int) );
cudaRKMeans(data, centers, &numTS, &ts_length, &k, clusters, withinss, success);
free(data);
free(centers);
free(clusters);
free(withinss);
free(success);
return 0;
}
}
|
7c219ce882314636396c994be6c3a737c7d78f89.cu
|
//CUDA implementation of k-means using DTW
//Author: Jeffrey Wong
//Last Modified: 27 May 2011
//Status: Working
//To Do: Code decomposition. Want to write DTW, kmeans, and tsdist as separate
//files. Problem, NVCC does not allow a kernel to use a device function from
//another file. How can we have the kmeans kernel refer to a DTW file?
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<time.h>
#include<math.h>
#include<cuda.h>
#include"cuPrintf.cu"
//////
//MISC
//////
int checkCUDAError()
{
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("Error during initialization: %s\n", cudaGetErrorString(error));
return -1;
}
return 1;
}
/////////////
//DTW HELPERS
/////////////
__host__ __device__ float min(float x, float y, float z)
{
float smallest = x;
if(smallest > y)
smallest = y;
if(smallest > z)
smallest = z;
return smallest;
}
//local distance for dtw
__host__ __device__ float distance(float x, float y)
{
if(x > y)
return (x-y);
else return (y-x);
}
//////////
//CUDA DTW
//////////
__device__ float cudaDTW(float* x, float* y, int length, float* DTW, int index)
{
int n = length+1;
float* dtw = DTW + index*n;
for(int i = 0; i < n; i++)
{
dtw[i] = 0;
}
float cost;
float next;
for(int i=0; i < n; i++)
{
dtw[i] = 9999;
}
float prev = 9999;
dtw[0] = 0;
for(int i = 0; i < length; i++)
{
for(int j = 0; j < length; j++)
{
cost = distance(x[i], y[j]);
next = cost + min(dtw[j+1], prev, dtw[j]);
if(i == length - 1 && j == length-1)
{
return next;
}
dtw[j+1] = next;
dtw[j] = prev;
prev = next;
}
}
return next;
}
float DTW(float* x, float* y, int length)
{
int n = (length+1);
float* DTW = (float*) malloc(n * sizeof(float));
if(DTW == NULL) { return -1; }
for(int i = 0; i < n; i++)
{
DTW[i] = 0;
}
float cost;
float next;
for(int i=0; i < n; i++)
{
DTW[i] = 9999;
}
float prev = 9999;
DTW[0] = 0;
for(int i = 0; i < length; i++)
{
for(int j = 0; j < length; j++)
{
cost = distance(x[i], y[j]);
next = cost + min(DTW[j+1], prev, DTW[j]);
DTW[j+1] = next;
DTW[j] = prev;
prev = next;
}
}
free(DTW);
return next;
}
////////////////
//KMEANS KERNELS
////////////////
__global__ void cudaKMeansDistances(float* d_data, float* d_centers, float* DTW, int numTS, int ts_length, int k, int* d_clusters)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i >= numTS) return;
float* ts = d_data + i*ts_length;
float shortestDistance = cudaDTW(ts, d_centers, ts_length, DTW, i);
int shortestDistanceIndex = 0;
for(int j = 1; j < k; j++)
{
float* center = d_centers + j*ts_length;
float distance = cudaDTW(ts, center, ts_length, DTW, i);
if(distance < shortestDistance)
{
shortestDistance = distance;
shortestDistanceIndex = j;
}
}
d_clusters[i] = shortestDistanceIndex;
}
__global__ void cudaKMeansCenters(float* d_data, float* d_centers, int numTS, int ts_length, int k, int* d_clusters)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i >= ts_length) return;
extern __shared__ int clusterSize[];
if(threadIdx.x == 0)
{
for(int j = 0; j < k; j++)
{
clusterSize[j] = 0;
}
for(int j = 0; j < numTS; j++)
{
int cluster = d_clusters[j];
clusterSize[cluster] = clusterSize[cluster] + 1;
}
}
__syncthreads();
for(int j = 0; j < k; j++)
{
float* center = d_centers + j*ts_length;
center[i] = 0;
}
for(int j = 0; j < numTS; j++)
{
float* ts = d_data + j*ts_length;
int cluster = d_clusters[j];
float* center = d_centers + cluster*ts_length;
center[i] = center[i] + ts[i];
}
for(int j = 0; j < k; j++)
{
float* center = d_centers + j*ts_length;
center[i] = center[i] / clusterSize[j];
}
}
////////////////////
//R KMEANS WITH CUDA
////////////////////
//Entry point for R to run cudaKMeans
extern "C" void cudaRKMeans(float* data, float* centers, int* numTSR, int* ts_lengthR, int* kR, int* clusters, float* withinss, int* success)
{
int numTS = numTSR[0];
int ts_length = ts_lengthR[0];
int k = kR[0];
float* d_data;
float* d_centers;
int* d_clusters;
float* dtw;
if(cudaSuccess != cudaMalloc( (void**)&d_data, numTS * ts_length * sizeof(float)))
{
printf("Could not cudaMalloc data\n");
success[0] = -1;
return;
}
if(cudaSuccess != cudaMalloc( (void**)&d_centers, k * ts_length * sizeof(float)))
{
printf("Could not cudaMalloc centers\n");
cudaFree(d_data);
success[0] = -1;
return;
}
if(cudaSuccess != cudaMalloc( (void**)&d_clusters, numTS * sizeof(int)))
{
printf("Could not cudaMalloc clusters\n");
cudaFree(d_data);
cudaFree(d_centers);
success[0] = -1;
return;
}
if(cudaSuccess != cudaMalloc( (void**)&dtw, numTS * (ts_length+1) * sizeof(float)))
{
printf("Could not cudaMalloc DTW\n");
cudaFree(d_data);
cudaFree(d_centers);
cudaFree(d_clusters);
success[0] = -1;
return;
}
cudaMemcpy(d_data, data, numTS*ts_length*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_centers, centers, k*ts_length*sizeof(float), cudaMemcpyHostToDevice);
if(checkCUDAError() == -1)
{
return;
}
printf("Computing k-means...\n");
int numIters = 10;
int blocksize = 512;
int gridsizeDistances = numTS / blocksize;
int gridsizeCenters = ts_length / blocksize;
for(int i = 0; i < numIters; i++)
{
printf("Begin iteration %d\n", i);
cudaThreadSynchronize();
cudaKMeansDistances<<<gridsizeDistances + 1, blocksize>>>(d_data, d_centers, dtw, numTS, ts_length, k, d_clusters);
cudaThreadSynchronize();
cudaKMeansCenters<<<gridsizeCenters + 1, blocksize, k * sizeof(float)>>>(d_data, d_centers, numTS, ts_length, k, d_clusters);
}
cudaThreadSynchronize();
cudaMemcpy(clusters, d_clusters, numTS*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(centers, d_centers, k * ts_length * sizeof(float), cudaMemcpyDeviceToHost);
//Compute withinss
for(int i = 0; i < numTS; i++)
{
float* ts = data + ts_length*i;
int cluster = clusters[i];
float* center = centers + cluster*ts_length;
float distanceToCenter = DTW(ts, center, ts_length);
withinss[cluster] = withinss[cluster] + (distanceToCenter * distanceToCenter);
}
for(int i = 0; i < numTS; i++)
{
printf("%d ", clusters[i]);
}
printf("\n");
cudaFree(d_data);
cudaFree(d_centers);
cudaFree(d_clusters);
cudaFree(dtw);
success[0] = 1;
if(checkCUDAError() == -1)
{
return;
}
cudaThreadExit();
}
/////////////////
//R KMEANS WITH C
/////////////////
//Entry point for R to run host KMeans
extern "C" void RKMeans(float* data, float* centers, int* numTSR, int* ts_lengthR, int* kR, int* clusters, float* withinss, int* success)
{
int numTS = numTSR[0];
int ts_length = ts_lengthR[0];
int k = kR[0];
int* ktable = (int*)malloc(sizeof(int) * k);
for(int i = 0; i < k; i++)
{
ktable[i] = 0;
}
int numIters = 10;
for(int rep = 0; rep < numIters; rep++)
{
printf("Begin iteration %d\n", rep);
//Compute cluster assignments and cluster size
for(int i = 0; i < numTS; i++)
{
float* ts = data + ts_length*i;
float smallestDistance = DTW(ts, centers, ts_length);
if(smallestDistance == -1) {success[0] = -1; return;}
int smallestDistanceIndex = 0;
//printf("DTW for TS %d with center 0: %f\n", i, smallestDistance);
for(int j = 1; j < k; j++)
{
float* center = centers + ts_length*j;
float dtw = DTW(ts, center, ts_length);
if(dtw == -1) {success[0] = -1; return;}
//printf("DTW for TS %d with center %d: %f\n", i, j, dtw);
if(dtw < smallestDistance)
{
smallestDistance = dtw;
smallestDistanceIndex = j;
}
}
//printf("Assinging TS %d to cluster %d\n", i, smallestDistanceIndex);
clusters[i] = smallestDistanceIndex;
ktable[smallestDistanceIndex] = ktable[smallestDistanceIndex] + 1;
}
//Reset centers
for(int i = 0; i < k*ts_length; i++)
{
centers[i] = 0;
}
//Set centers to be center of data
for(int i = 0; i < numTS; i++)
{
float* ts = data + ts_length*i;
int cluster = clusters[i];
float* center = centers + ts_length*cluster;
for(int j = 0; j < ts_length; j++)
{
center[j] = center[j] + ts[j];
}
}
for(int i = 0; i < k; i++)
{
float* center = centers + ts_length*i;
int clusterSize = ktable[i];
for(int j = 0; j < ts_length; j++)
{
center[j] = center[j] / clusterSize;
}
}
for(int i = 0; i < k; i++)
{
ktable[i] = 0;
}
}
//Final Steps:
//Compute withinss
//Reindex cluster assignments to start with 1
//Clean up memory
for(int i = 0; i < numTS; i++)
{
float* ts = data + ts_length*i;
int cluster = clusters[i];
float* center = centers + cluster*ts_length;
float distanceToCenter = DTW(ts, center, ts_length);
withinss[cluster] = withinss[cluster] + (distanceToCenter*distanceToCenter);
}
free(ktable);
success[0] = 1;
}
//////////////////
//R TS DIST WITH C
//////////////////
//Entry point for R to run host TS distance matrix
extern "C" void RTSDist(float* data, int* numTSR, int* ts_lengthR, float* distances)
{
int numTS = numTSR[0];
int ts_length = ts_lengthR[0];
for(int i = 0; i < numTS; i++)
{
float* ts1 = data + i*ts_length;
for(int j = i+1; j < numTS; j++)
{
float* ts2 = data + j*ts_length;
float distance = DTW(ts1, ts2, ts_length);
if(i == 0)
distances[j-1] = distance;
else
distances[numTS*i - ((i+1)*(i+2)/2) + j] = distance;
}
}
}
//////////////////////
//R TS DIST WITH CUDA
//////////////////////
__global__ void cudaTSDistKer(float* d_data, float* DTW, int numTS, int ts_length, int totalComparisons, float* d_distances)
{
int z = blockDim.x * blockIdx.x + threadIdx.x;
if(z >= totalComparisons) return;
int i, j;
float a = -1;
float b = -1 + 2*numTS;
float c = -2*z;
float i1 = (-b + sqrt(b*b - 4*a*c)) / (2*a);
i = (int)i1;
float j1 = z - i*numTS + ((i1+1)*(i1+2)/2);
j = (int)j1;
float* ts1 = d_data + i * ts_length;
float* ts2 = d_data + j * ts_length;
float dtw = cudaDTW(ts1, ts2, ts_length, DTW, z);
d_distances[z] = dtw;
}
//Entry point for R to run GPU TS distance matrix
extern "C" void cudaRTSDist(float* data, int* numTSR, int* ts_lengthR, float* distances, int* success)
{
int numTS = numTSR[0];
int ts_length = ts_lengthR[0];
float* d_data;
float* d_distances;
float* dtw;
int totalComparisons = ((float)numTS/2) * ((float)numTS-1);
printf("%d ", totalComparisons);
if(cudaSuccess != cudaMalloc( (void**)&d_data, numTS * ts_length * sizeof(float)))
{
printf("Could not cudaMalloc data\n");
return;
}
if(cudaSuccess != cudaMalloc( (void**)&d_distances, totalComparisons * sizeof(float)))
{
printf("Could not cudaMalloc distances\n");
cudaFree(d_data);
return;
}
if(cudaSuccess != cudaMalloc( (void**)&dtw, totalComparisons * (ts_length + 1) * sizeof(float)))
{
printf("Could not cudaMalloc dtw\n");
cudaFree(d_data);
cudaFree(d_distances);
return;
}
cudaMemcpy(d_data, data, numTS * ts_length * sizeof(float), cudaMemcpyHostToDevice);
int blocksize = totalComparisons;
if(blocksize > 512)
blocksize = 512;
int gridsize = totalComparisons / blocksize;
printf("Calculating distances...\n");
cudaTSDistKer<<<gridsize+1, blocksize>>>(d_data, dtw, numTS, ts_length, totalComparisons, d_distances);
cudaThreadSynchronize();
cudaMemcpy(distances, d_distances, totalComparisons * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_data);
cudaFree(d_distances);
cudaFree(dtw);
cudaThreadExit();
success[0] = 1;
}
///////////////////////////////////
//KMEANS ON THE HOST FOR REFERENCE
///////////////////////////////////
int* cpuKMeans(float* data, float* centers, int numTS, int ts_length, int k)
{
int* clusters = (int*)malloc(sizeof(int) * numTS);
int* ktable = (int*)malloc(sizeof(int) * k);
for(int i = 0; i < k; i++)
{
ktable[i] = 0;
}
int numIters = 10;
for(int rep = 0; rep < numIters; rep++)
{
printf("Begin iteration %d\n", rep);
//Compute cluster assignments and cluster size
for(int i = 0; i < numTS; i++)
{
float* ts = data + ts_length*i;
float smallestDistance = DTW(ts, centers, ts_length);
int smallestDistanceIndex = 0;
//printf("DTW for TS %d with center 0: %f\n", i, smallestDistance);
for(int j = 1; j < k; j++)
{
float* center = centers + ts_length*j;
float dtw = DTW(ts, center, ts_length);
//printf("DTW for TS %d with center %d: %f\n", i, j, dtw);
if(dtw < smallestDistance)
{
smallestDistance = dtw;
smallestDistanceIndex = j;
}
}
//printf("Assinging TS %d to cluster %d\n", i, smallestDistanceIndex);
clusters[i] = smallestDistanceIndex;
ktable[smallestDistanceIndex] = ktable[smallestDistanceIndex] + 1;
}
//Reset centers
for(int i = 0; i < k*ts_length; i++)
{
centers[i] = 0;
}
//Set centers to be center of data
for(int i = 0; i < numTS; i++)
{
float* ts = data + ts_length*i;
int cluster = clusters[i];
float* center = centers + ts_length*cluster;
for(int j = 0; j < ts_length; j++)
{
center[j] = center[j] + ts[j];
}
}
for(int i = 0; i < k; i++)
{
float* center = centers + ts_length*i;
int clusterSize = ktable[i];
for(int j = 0; j < ts_length; j++)
{
center[j] = center[j] / clusterSize;
}
}
for(int i = 0; i < k; i++)
{
ktable[i] = 0;
}
}
//Final Steps:
//Clean up memory
free(ktable);
return clusters;
}
int main(int argc, char** argv)
{
if(argc != 5)
{
printf("Usage: ./kmeans <-cpu or -gpu> <numTS> <ts_length> <k>\n");
return -1;
}
srand(100);
int numTS = atoi(argv[2]);
int ts_length = atoi(argv[3]);
int k = atoi(argv[4]);
float* data = (float*)malloc(sizeof(float) * numTS * ts_length);
for(int i = 0; i < numTS*ts_length; i++)
{
if(i < numTS*ts_length / 2)
data[i] = rand() % 300;
else data[i] = (rand() % 10) * (rand() % 10);
}
float* centers = (float*)malloc(sizeof(float) * k * ts_length);
for(int i = 0; i < k * ts_length; i++)
{
if(i < k*ts_length)
centers[i] = rand() % 150;
else centers[i] = (rand() % 10) * (rand() % 10);
}
if(strcmp(argv[1], "-cpu") == 0)
{
int* clusters = cpuKMeans(data, centers, numTS, ts_length, k);
for(int i = 0; i < numTS; i++)
{
printf("%d ", clusters[i]);
}
printf("\n");
free(data);
free(centers);
free(clusters);
return 0;
}
else if(strcmp(argv[1], "-gpu") == 0)
{
int* clusters = (int*)malloc(numTS * sizeof(int));
float* withinss = (float*)malloc(k * sizeof(float));
int* success = (int*)malloc( sizeof(int) );
cudaRKMeans(data, centers, &numTS, &ts_length, &k, clusters, withinss, success);
free(data);
free(centers);
free(clusters);
free(withinss);
free(success);
return 0;
}
}
|
e9fcb8e73520c11ee13393a352e2c33233aed1f1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "readGlobalMemoryUnit.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *data = NULL;
hipMalloc(&data, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int repeat = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
readGlobalMemoryUnit), dim3(gridBlock),dim3(threadBlock), 0, 0, data,output,size,repeat);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
readGlobalMemoryUnit), dim3(gridBlock),dim3(threadBlock), 0, 0, data,output,size,repeat);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
readGlobalMemoryUnit), dim3(gridBlock),dim3(threadBlock), 0, 0, data,output,size,repeat);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
e9fcb8e73520c11ee13393a352e2c33233aed1f1.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "readGlobalMemoryUnit.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *data = NULL;
cudaMalloc(&data, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int repeat = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
readGlobalMemoryUnit<<<gridBlock,threadBlock>>>(data,output,size,repeat);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
readGlobalMemoryUnit<<<gridBlock,threadBlock>>>(data,output,size,repeat);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
readGlobalMemoryUnit<<<gridBlock,threadBlock>>>(data,output,size,repeat);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
089e4859fa7ba6ab6d61736b7423048bf0c78445.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "parameter.h"
#include "array_definition.h"
//#include "cuda_funclist.h"
//#include "cuda_function.h"
//#include "cuda_subroutine.h"
#include "hip_funclist.h"
#include "hip_function.h"
#include "hip_subroutine.h"
extern "C" void cuda_main(float *h_u, float *h_b, int *h_nx, int *h_ny, int *h_nz)
{
// general info initialization
int Totalthreads = (*h_nx)*(*h_ny)*(*h_nz);
int numThreadsPerBlock = *h_nx;
int numBlocks = Totalthreads/numThreadsPerBlock;
int NumOfU = 5;
int NumOfB = 3;
// memory size initialization
size_t u_memSize = NumOfU * numBlocks * numThreadsPerBlock * sizeof(float);
size_t b_memSize = NumOfB * numBlocks * numThreadsPerBlock * sizeof(float);
size_t c_memSize = numBlocks * numThreadsPerBlock * sizeof(float);
size_t int_memSize = sizeof(int);
size_t float_memSize = sizeof(float);
// data on the host
float *h_dt;
// data on the device
// hipMalloc
// for general purpose
float *d_u, *d_b;
hipMalloc( (void **) &d_u, u_memSize );
hipMalloc( (void **) &d_b, b_memSize );
int *d_nx,*d_ny,*d_nz;
hipMalloc( (void **) &d_nx, int_memSize );
hipMalloc( (void **) &d_ny, int_memSize );
hipMalloc( (void **) &d_nz, int_memSize );
float *d_dt;
hipMalloc( (void **) &d_dt, float_memSize );
// for cuda_cfl
float *d_c;
hipMalloc( (void **) &d_c, c_memSize );
// for cuda_advectbyzx
float *d_temp;
hipMalloc( (void **) &d_temp, c_memSize );
// for cuda_transpose
float *d_ut, *d_bt;
hipMalloc( (void **) &d_ut, u_memSize );
hipMalloc( (void **) &d_bt, b_memSize );
// hipMemcpy
// copy data from host to device
hipMemcpy( d_u, h_u, u_memSize, hipMemcpyHostToDevice );
hipMemcpy( d_b, h_b, b_memSize, hipMemcpyHostToDevice );
hipMemcpy( d_nx, h_nx, int_memSize, hipMemcpyHostToDevice );
hipMemcpy( d_ny, h_ny, int_memSize, hipMemcpyHostToDevice );
hipMemcpy( d_nz, h_nz, int_memSize, hipMemcpyHostToDevice );
//
checkCUDAError("memcpy: from host to device, in cuda_main");
// initialize data for loop
float t,dt,tf;
int iter;
float ct;
printf("in the cuda_main\n");
t=0;
iter=0;
ct=100.;
tf=ct*10;
// initialization for timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// in milliseconds with a resolution of around 0.5 microseconds
float elapsedTime;
printf("move into to do loop\n");
do {
// start the timer
hipEventRecord(start,0);
// output
// if you want to output, you have to use hipMemcpy
// copy the data from device to host to output
hipMemcpy( h_u, d_u, u_memSize, hipMemcpyDeviceToHost );
hipMemcpy( h_b, d_b, b_memSize, hipMemcpyDeviceToHost );
printf("output\n");
printf("t= %f, %i, %f\n",t,iter,h_u[a4D_FinC(5,(*h_nx),(*h_ny),(*h_nz),(5-1),(*h_nx)/4,1,1)]);
// done output
printf("done output\n");
iter=iter+1;
printf("into the cuda_cfl\n");
cuda_cfl(d_u,d_b,d_nx,d_ny,d_nz,d_dt,d_c,h_nx,h_ny,h_nz,h_dt);
printf("cuda_cfl done\n");
dt=0.9*(*h_dt);
//dt=0.5;
if (dt>(tf-t)/2.0) dt=(tf-t)/2.0;
t=t+2.0*dt;
// start sweep
printf("cuda_fluidx\n");
cuda_fluidx(d_u,d_b,d_nx,d_ny,d_nz,d_dt,h_nx,h_ny,h_nz);
printf("cuda_advectbyzx\n");
cuda_advectbyzx(d_u,d_b,d_nx,d_ny,d_nz,d_dt,d_temp,h_nx,h_ny,h_nz);
// the y sweep
printf("cuda_transpose12\n");
cuda_transpose12(d_ut,d_bt,d_u,d_b,d_nx,d_ny,d_nz,h_nx,h_ny,h_nz);
cuda_fluidx(d_u,d_b,d_ny,d_nx,d_nz,d_dt,h_ny,h_nx,h_nz);
cuda_advectbyzx(d_u,d_b,d_ny,d_nx,d_nz,d_dt,d_temp,h_ny,h_nx,h_nz);
// z sweep
cuda_transpose13(d_ut,d_bt,d_u,d_b,d_ny,d_nx,d_nz,h_ny,h_nx,h_nz);
cuda_fluidx(d_u,d_b,d_nz,d_nx,d_ny,d_dt,h_nz,h_nx,h_ny);
cuda_advectbyzx(d_u,d_b,d_nz,d_nx,d_ny,d_dt,d_temp,h_nz,h_nx,h_ny);
cuda_advectbyzx(d_u,d_b,d_nz,d_nx,d_ny,d_dt,d_temp,h_nz,h_nx,h_ny);
cuda_fluidx(d_u,d_b,d_nz,d_nx,d_ny,d_dt,h_nz,h_nx,h_ny);
// back
cuda_transpose13(d_ut,d_bt,d_u,d_b,d_nz,d_nx,d_ny,h_nz,h_nx,h_ny);
cuda_advectbyzx(d_u,d_b,d_ny,d_nx,d_nz,d_dt,d_temp,h_ny,h_nx,h_nz);
cuda_fluidx(d_u,d_b,d_ny,d_nx,d_nz,d_dt,h_ny,h_nx,h_nz);
// x again
cuda_transpose12(d_ut,d_bt,d_u,d_b,d_ny,d_nx,d_nz,h_ny,h_nx,h_nz);
cuda_advectbyzx(d_u,d_b,d_nx,d_ny,d_nz,d_dt,d_temp,h_nx,h_ny,h_nz);
cuda_fluidx(d_u,d_b,d_nx,d_ny,d_nz,d_dt,h_nx,h_ny,h_nz);
// finish sweep
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
printf("time per loop(in milliseconds): %f\n",elapsedTime);
} while (t<tf);
//
// hipMemcpy
// copy data from device to host
hipMemcpy( h_u, d_u, u_memSize, hipMemcpyDeviceToHost );
hipMemcpy( h_b, d_b, b_memSize, hipMemcpyDeviceToHost );
//
checkCUDAError("memcpy: from device to host, in cuda_main");
//
hipFree(d_u);
hipFree(d_b);
hipFree(d_nx);
hipFree(d_ny);
hipFree(d_nz);
hipFree(d_dt);
//
hipEventDestroy(start);
hipEventDestroy(stop);
//
return;
}
|
089e4859fa7ba6ab6d61736b7423048bf0c78445.cu
|
#include <stdio.h>
#include <math.h>
#include "cuda.h"
#include "parameter.h"
#include "array_definition.h"
//#include "cuda_funclist.h"
//#include "cuda_function.h"
//#include "cuda_subroutine.h"
#include "hip_funclist.h"
#include "hip_function.h"
#include "hip_subroutine.h"
extern "C" void cuda_main(float *h_u, float *h_b, int *h_nx, int *h_ny, int *h_nz)
{
// general info initialization
int Totalthreads = (*h_nx)*(*h_ny)*(*h_nz);
int numThreadsPerBlock = *h_nx;
int numBlocks = Totalthreads/numThreadsPerBlock;
int NumOfU = 5;
int NumOfB = 3;
// memory size initialization
size_t u_memSize = NumOfU * numBlocks * numThreadsPerBlock * sizeof(float);
size_t b_memSize = NumOfB * numBlocks * numThreadsPerBlock * sizeof(float);
size_t c_memSize = numBlocks * numThreadsPerBlock * sizeof(float);
size_t int_memSize = sizeof(int);
size_t float_memSize = sizeof(float);
// data on the host
float *h_dt;
// data on the device
// cudaMalloc
// for general purpose
float *d_u, *d_b;
cudaMalloc( (void **) &d_u, u_memSize );
cudaMalloc( (void **) &d_b, b_memSize );
int *d_nx,*d_ny,*d_nz;
cudaMalloc( (void **) &d_nx, int_memSize );
cudaMalloc( (void **) &d_ny, int_memSize );
cudaMalloc( (void **) &d_nz, int_memSize );
float *d_dt;
cudaMalloc( (void **) &d_dt, float_memSize );
// for cuda_cfl
float *d_c;
cudaMalloc( (void **) &d_c, c_memSize );
// for cuda_advectbyzx
float *d_temp;
cudaMalloc( (void **) &d_temp, c_memSize );
// for cuda_transpose
float *d_ut, *d_bt;
cudaMalloc( (void **) &d_ut, u_memSize );
cudaMalloc( (void **) &d_bt, b_memSize );
// cudaMemcpy
// copy data from host to device
cudaMemcpy( d_u, h_u, u_memSize, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, h_b, b_memSize, cudaMemcpyHostToDevice );
cudaMemcpy( d_nx, h_nx, int_memSize, cudaMemcpyHostToDevice );
cudaMemcpy( d_ny, h_ny, int_memSize, cudaMemcpyHostToDevice );
cudaMemcpy( d_nz, h_nz, int_memSize, cudaMemcpyHostToDevice );
//
checkCUDAError("memcpy: from host to device, in cuda_main");
// initialize data for loop
float t,dt,tf;
int iter;
float ct;
printf("in the cuda_main\n");
t=0;
iter=0;
ct=100.;
tf=ct*10;
// initialization for timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// in milliseconds with a resolution of around 0.5 microseconds
float elapsedTime;
printf("move into to do loop\n");
do {
// start the timer
cudaEventRecord(start,0);
// output
// if you want to output, you have to use cudaMemcpy
// copy the data from device to host to output
cudaMemcpy( h_u, d_u, u_memSize, cudaMemcpyDeviceToHost );
cudaMemcpy( h_b, d_b, b_memSize, cudaMemcpyDeviceToHost );
printf("output\n");
printf("t= %f, %i, %f\n",t,iter,h_u[a4D_FinC(5,(*h_nx),(*h_ny),(*h_nz),(5-1),(*h_nx)/4,1,1)]);
// done output
printf("done output\n");
iter=iter+1;
printf("into the cuda_cfl\n");
cuda_cfl(d_u,d_b,d_nx,d_ny,d_nz,d_dt,d_c,h_nx,h_ny,h_nz,h_dt);
printf("cuda_cfl done\n");
dt=0.9*(*h_dt);
//dt=0.5;
if (dt>(tf-t)/2.0) dt=(tf-t)/2.0;
t=t+2.0*dt;
// start sweep
printf("cuda_fluidx\n");
cuda_fluidx(d_u,d_b,d_nx,d_ny,d_nz,d_dt,h_nx,h_ny,h_nz);
printf("cuda_advectbyzx\n");
cuda_advectbyzx(d_u,d_b,d_nx,d_ny,d_nz,d_dt,d_temp,h_nx,h_ny,h_nz);
// the y sweep
printf("cuda_transpose12\n");
cuda_transpose12(d_ut,d_bt,d_u,d_b,d_nx,d_ny,d_nz,h_nx,h_ny,h_nz);
cuda_fluidx(d_u,d_b,d_ny,d_nx,d_nz,d_dt,h_ny,h_nx,h_nz);
cuda_advectbyzx(d_u,d_b,d_ny,d_nx,d_nz,d_dt,d_temp,h_ny,h_nx,h_nz);
// z sweep
cuda_transpose13(d_ut,d_bt,d_u,d_b,d_ny,d_nx,d_nz,h_ny,h_nx,h_nz);
cuda_fluidx(d_u,d_b,d_nz,d_nx,d_ny,d_dt,h_nz,h_nx,h_ny);
cuda_advectbyzx(d_u,d_b,d_nz,d_nx,d_ny,d_dt,d_temp,h_nz,h_nx,h_ny);
cuda_advectbyzx(d_u,d_b,d_nz,d_nx,d_ny,d_dt,d_temp,h_nz,h_nx,h_ny);
cuda_fluidx(d_u,d_b,d_nz,d_nx,d_ny,d_dt,h_nz,h_nx,h_ny);
// back
cuda_transpose13(d_ut,d_bt,d_u,d_b,d_nz,d_nx,d_ny,h_nz,h_nx,h_ny);
cuda_advectbyzx(d_u,d_b,d_ny,d_nx,d_nz,d_dt,d_temp,h_ny,h_nx,h_nz);
cuda_fluidx(d_u,d_b,d_ny,d_nx,d_nz,d_dt,h_ny,h_nx,h_nz);
// x again
cuda_transpose12(d_ut,d_bt,d_u,d_b,d_ny,d_nx,d_nz,h_ny,h_nx,h_nz);
cuda_advectbyzx(d_u,d_b,d_nx,d_ny,d_nz,d_dt,d_temp,h_nx,h_ny,h_nz);
cuda_fluidx(d_u,d_b,d_nx,d_ny,d_nz,d_dt,h_nx,h_ny,h_nz);
// finish sweep
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("time per loop(in milliseconds): %f\n",elapsedTime);
} while (t<tf);
//
// cudaMemcpy
// copy data from device to host
cudaMemcpy( h_u, d_u, u_memSize, cudaMemcpyDeviceToHost );
cudaMemcpy( h_b, d_b, b_memSize, cudaMemcpyDeviceToHost );
//
checkCUDAError("memcpy: from device to host, in cuda_main");
//
cudaFree(d_u);
cudaFree(d_b);
cudaFree(d_nx);
cudaFree(d_ny);
cudaFree(d_nz);
cudaFree(d_dt);
//
cudaEventDestroy(start);
cudaEventDestroy(stop);
//
return;
}
|
992a86be76fa7499d0e2af705ab27312b0d3a23e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pathfinder_cuda.h"
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define NUM_ITR 4000
__global__ void dynproc_kernel(
int iteration,
int *gpuWall,
int *gpuSrc,
int *gpuResults,
int cols,
int rows,
int startStep,
int border)
{
__shared__ int prev[BLOCK_SIZE];
__shared__ int result[BLOCK_SIZE];
int bx = blockIdx.x;
int tx=threadIdx.x;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_cols = BLOCK_SIZE-iteration*HALO*2;
// calculate the boundary for the block according to
// the boundary of its small block
int blkX = small_block_cols*bx-border;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int xidx = blkX+tx;
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1;
int W = tx-1;
int E = tx+1;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool isValid = IN_RANGE(tx, validXmin, validXmax);
if(IN_RANGE(xidx, 0, cols-1)){
prev[tx] = gpuSrc[xidx];
}
bool computed;
for (int k = 0; k < NUM_ITR; k++) {
for (int i=0; i<iteration; i++) {
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
isValid){
computed = true;
int left = prev[W];
int up = prev[tx];
int right = prev[E];
int shortest = MIN(left, up);
shortest = MIN(shortest, right);
int index = cols*(startStep+i)+xidx;
result[tx] = shortest + gpuWall[index];
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
prev[tx]= result[tx];
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
gpuResults[xidx]=result[tx];
}
}
}
|
992a86be76fa7499d0e2af705ab27312b0d3a23e.cu
|
#include "pathfinder_cuda.h"
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define NUM_ITR 4000
__global__ void dynproc_kernel(
int iteration,
int *gpuWall,
int *gpuSrc,
int *gpuResults,
int cols,
int rows,
int startStep,
int border)
{
__shared__ int prev[BLOCK_SIZE];
__shared__ int result[BLOCK_SIZE];
int bx = blockIdx.x;
int tx=threadIdx.x;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_cols = BLOCK_SIZE-iteration*HALO*2;
// calculate the boundary for the block according to
// the boundary of its small block
int blkX = small_block_cols*bx-border;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int xidx = blkX+tx;
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1;
int W = tx-1;
int E = tx+1;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool isValid = IN_RANGE(tx, validXmin, validXmax);
if(IN_RANGE(xidx, 0, cols-1)){
prev[tx] = gpuSrc[xidx];
}
bool computed;
for (int k = 0; k < NUM_ITR; k++) {
for (int i=0; i<iteration; i++) {
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
isValid){
computed = true;
int left = prev[W];
int up = prev[tx];
int right = prev[E];
int shortest = MIN(left, up);
shortest = MIN(shortest, right);
int index = cols*(startStep+i)+xidx;
result[tx] = shortest + gpuWall[index];
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
prev[tx]= result[tx];
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
gpuResults[xidx]=result[tx];
}
}
}
|
1a836fac5273b9abd245aa36d909f41302f5e2c0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 3
* of the programming guide with some additions like error checking.
*
*/
#include <stdio.h>
#include <hip/device_functions.h>
//#include <hip/hip_fp16.h>
/////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void ADMM_ScaleLLRs(double* LLRs, int N)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
const double mu = 3.0f;
LLRs[i] = LLRs[i] / mu;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void ADMM_HardDecision(
double* OutputFromDecoder, int* HardDecision, int N
)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
HardDecision[i] = floorf(OutputFromDecoder[i] + 0.50f);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__shared__ int sdata[128*12]; // > 512
__global__ void reduce64(int *g_idata, unsigned int n)
{
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
unsigned int gridSize = blockDim.x * 2 * gridDim.x;
int mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += g_idata[i];
// ensure we don't read out of bounds
if (i + blockDim.x < n)
mySum += g_idata[i+blockDim.x];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 1024) { if (tid < 512) { sdata[tid] = mySum = mySum + sdata[tid + 512]; } __syncthreads(); }
if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
// avoid bank conflict
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile int* smem = sdata;
if (blockDim.x >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (blockDim.x >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (blockDim.x >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (blockDim.x >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (blockDim.x >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (blockDim.x >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
// write result for this block to global mem
if (tid == 0)
g_idata[blockIdx.x] = sdata[0];
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
1a836fac5273b9abd245aa36d909f41302f5e2c0.cu
|
/*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 3
* of the programming guide with some additions like error checking.
*
*/
#include <stdio.h>
#include <device_functions.h>
//#include <cuda_fp16.h>
/////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void ADMM_ScaleLLRs(double* LLRs, int N)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
const double mu = 3.0f;
LLRs[i] = LLRs[i] / mu;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void ADMM_HardDecision(
double* OutputFromDecoder, int* HardDecision, int N
)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
HardDecision[i] = floorf(OutputFromDecoder[i] + 0.50f);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__shared__ int sdata[128*12]; // > 512
__global__ void reduce64(int *g_idata, unsigned int n)
{
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
unsigned int gridSize = blockDim.x * 2 * gridDim.x;
int mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += g_idata[i];
// ensure we don't read out of bounds
if (i + blockDim.x < n)
mySum += g_idata[i+blockDim.x];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 1024) { if (tid < 512) { sdata[tid] = mySum = mySum + sdata[tid + 512]; } __syncthreads(); }
if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
// avoid bank conflict
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile int* smem = sdata;
if (blockDim.x >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (blockDim.x >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (blockDim.x >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (blockDim.x >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (blockDim.x >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (blockDim.x >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
// write result for this block to global mem
if (tid == 0)
g_idata[blockIdx.x] = sdata[0];
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
Lensing.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Lensing.cuh"
#include <stdlib.h>
#include <cmath>
#include <fstream>
#include <sstream>
#include <iterator>
#include <GL\glew.h>
#include <FreeImage/FreeImage.h>
namespace ginkgo
{
#define DATADEBUG 1
#define ZEROPADDING 0
std::vector<std::vector<glm::dvec4>> lensing()
{
int N =
#if ZEROPADDING
2 *
#endif
514; // ex: N = 2*32 //future: 128 X 128, N = 128 // N is the sidelength of the image
int block_size_x = 32;
int block_size_y = 32;
double *kx, *ky, *r;
kx = (double *)malloc(sizeof(double) * N);
ky = (double *)malloc(sizeof(double) * N);
r = (double *)malloc(sizeof(double) * N * N);
double *kx_d, *ky_d, *r_d;
hipfftDoubleComplex *r_complex_d;
hipMalloc((void **)&kx_d, sizeof(double) * N);
hipMalloc((void **)&ky_d, sizeof(double) * N);
hipMalloc((void **)&r_d, sizeof(double) * N * N);
hipMalloc((void **)&r_complex_d, sizeof(hipfftDoubleComplex) * N * N);
for (int y = 0; y < N; y++)
for (int x = 0; x < N; x++)
r[x + y * N] = sin(
exp(-((x - N / 2.0f) * (x - N / 2.0f) + (N / 2.0f - y) * (N / 2.0f - y)) / (50 * 50))
);
#if ZEROPADDING
for (int y = 0; y < N; y++)
for (int x = 0; x < N; x++)
if (x < N / 4.0 || x > N * 3.0 / 4.0 || y < N / 4.0 || y > N*3.0 / 4.0)
r[x + y*N] = 0;
#endif
double* r_inital = (double *)malloc(sizeof(double) * N * N);
for (int i = 0; i < N * N; i++)
r_inital[i] = r[i];
for (int i = 0; i < N; i++)
{
kx[i] = i - N / 2.0f; //centers kx values to be at center of image
ky[i] = N / 2.0f - i; //centers ky values to be at center of image
}
#if DATADEBUG
writeDoubleArrayToBitmapImage("DataDebug/SpatialDensity.bmp", N, r);
write("DataDebug/SpatialDensity.csv", r, N, N);
#endif
hipMemcpy(kx_d, kx, sizeof(double) * N, hipMemcpyHostToDevice);
hipMemcpy(ky_d, ky, sizeof(double) * N, hipMemcpyHostToDevice);
hipMemcpy(r_d, r, sizeof(double) * N * N, hipMemcpyHostToDevice);
hipfftHandle plan;
hipfftPlan2d(&plan, N, N, HIPFFT_C2C);
/* Compute the execution configuration
NB: block_size_x*block_size_y = number of threads */
dim3 dimBlock(block_size_x, block_size_y);
dim3 dimGrid(N / dimBlock.x, N / dimBlock.y);
/* Handle N not multiple of block_size_x or block_size_y */
if (N % block_size_x != 0) dimGrid.x += 1;
if (N % block_size_y != 0) dimGrid.y += 1;
real2complex << < dimGrid, dimBlock >> > (r_complex_d, r_d, N);
hipfftExecZ2Z(plan, r_complex_d, r_complex_d, HIPFFT_FORWARD);
//solve_poisson << <dimGrid, dimBlock >> > (r_complex_d, kx_d, ky_d, N);
hipfftExecZ2Z(plan, r_complex_d, r_complex_d, HIPFFT_BACKWARD);
double scale = 1.0f / (N * N);// *2E3;
complex2real_scaled << <dimGrid, dimBlock >> > (r_d, r_complex_d, scale, N);
hipMemcpy(r, r_d, sizeof(double) * N * N, hipMemcpyDeviceToHost);
#if DATADEBUG
writeDoubleArrayToBitmapImage("DataDebug/GravitationalPotential.bmp", N, r);
write("DataDebug/GravitationalPotential.csv", r, N, N);
#endif
std::vector<std::vector<glm::dvec4>> normals = generateNormals(r, N, N);
glm::dvec4 normal = normals[N / 2][N / 2];
std::cout << "Center: " << ":\t" << normal.x << " " << normal.y << " " << normal.z << std::endl;
//normals[center] = glm::dvec3(0.0f, 0.0f, 0.0f);
#if DATADEBUG
writeNormalsToBitmapImage("DataDebug/Normals.bmp", N - 2, normals);
writeNormals("DataDebug/Normals.csv", normals);
#endif
//system("pause");
/* Destroy plan and clean up memory on device*/
free(kx);
free(ky);
free(r);
free(r_inital);
hipfftDestroy(plan);
hipFree(r_complex_d);
hipFree(kx_d);
std::cout << "Normal Size: " << normals.size() << "\n";
return normals;
}
__global__ void real2complex(hipfftDoubleComplex *c, double *a, int N)
{
/* compute idx and idy, the location of the element in the original NxN array */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < N && idy < N)
{
int index = idx + idy * N;
c[index].x = a[index];
c[index].y = 0.0f;
}
}
__global__ void solve_poisson(hipfftDoubleComplex *c, double *kx, double *ky, int N)
{
/* compute idx and idy, the location of the element in the original NxN array */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < N && idy < N)
{
int index = idx + idy * N;
double scale = -(kx[idx] * kx[idx] + ky[idy] * ky[idy]);
if (idx == N / 2 && idy == N / 2) scale = -1.0f;
scale = 1.0f / scale;
c[index].x *= scale;
c[index].y *= scale;
}
}
__global__ void complex2real_scaled(double *a, hipfftDoubleComplex *c, double scale, int N)
{
/* compute idx and idy, the location of the element in the original NxN array */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < N && idy < N)
{
int index = idx + idy * N;
a[index] = scale * c[index].x;
}
}
void writeFast(const std::string& path, double* data, int NxN, int mode)
{
std::ofstream out(path.c_str(), mode);
for (int i = 0; i < NxN; i++)
out.write(reinterpret_cast<char *>(&data[i]), sizeof(double));
out.close();
}
void readFast(const std::string& path, double* data, int NxN, int mode)
{
std::ifstream in(path.c_str(), mode);
for (int i = 0; i < NxN; i++)
in.read(reinterpret_cast<char *>(&data[i]), sizeof(double));
in.close();
}
void write(const std::string& path, double* data, int height, int width, int mode)
{
std::ofstream fout(path, mode);
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
fout << data[x + y * width] << ",";
fout << std::endl;
}
fout.close();
}
void write(const std::string& path, std::vector<double>& data, int height, int width, int mode)
{
std::ofstream fout(path, mode);
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
fout << data[x + y * width] << ",";
fout << std::endl;
}
fout.close();
}
void writeCFast(const std::string& filename, const double* data, size_t size)
{
FILE* pFile;
pFile = fopen(filename.c_str(), "wb");
for (unsigned long long j = 0; j < 1024; ++j) {
//Some calculations to fill a[]
fwrite(data, 1, size * sizeof(double), pFile);
}
fclose(pFile);
}
double* readCFast(const std::string& filename, size_t size)
{
FILE* pFile;
long lSize;
double* data;
size_t result;
pFile = fopen(filename.c_str(), "rb");
if (pFile == NULL) { fputs("File error in readBOOMFast()", stderr); system("pause"); }
// obtain file size:
fseek(pFile, 0, SEEK_END);
lSize = ftell(pFile);
rewind(pFile);
// allocate memory to contain the whole file:
data = (double*)malloc(sizeof(char)*lSize);
if (data == NULL) { fputs("Memory error in readBOOMFast()", stderr); system("pause"); }
// copy the file into the buffer:
result = fread(data, 1, lSize, pFile);
if (result != lSize) { fputs("Reading error in readBOOMFast()", stderr); system("pause"); }
/* the whole file is now loaded in the memory buffer. */
// terminate
fclose(pFile);
return data; //remember to free(data)
}
void writeDoubleArrayToBitmapImage(const char* filename, int N, double *r)
{
double max = r[0];
double min = r[0];
for (int i = 0; i < N * N; i++)
{
if (r[i] > max)
max = r[i];
if (r[i] < min)
min = r[i];
}
unsigned char* pixels = new unsigned char[3 * N *N];
for (int i = 0; i < 3 * N *N; i += 3)
//for (int i = 3 * (width * height - 1); i >= 0 ; i -= 3)
{
int greyColor = static_cast<int>(255.0 / (max - min) * (r[i / 3] - min));
pixels[i + 2] = greyColor; //red
pixels[i + 1] = greyColor; //green
pixels[i + 0] = greyColor; //blue
}
FIBITMAP* image = FreeImage_ConvertFromRawBits(pixels, N, N, 3 * N, 24, 0xFF0000, 0x00FF00, 0x0000FF, false);
FreeImage_FlipVertical(image);
FreeImage_Save(FIF_BMP, image, filename, 0);
FreeImage_Unload(image);
delete[] pixels;
}
void writeNormalsToBitmapImage(const char* filename, int N, const std::vector<std::vector<glm::dvec4>>& normals)
{
glm::dvec4 max = normals[0][0];
glm::dvec4 min = normals[0][0];
for (int r = 0; r < N; r++)
{
for (int c = 0; c < N; c++)
{
if (normals[r][c].x > max.x)
max.x = normals[r][c].x;
if (normals[r][c].y > max.y)
max.y = normals[r][c].y;
if (normals[r][c].z > max.z)
max.z = normals[r][c].z;
if (normals[r][c].x < min.x)
min.x = normals[r][c].x;
if (normals[r][c].y < min.y)
min.y = normals[r][c].y;
if (normals[r][c].z < min.z)
min.z = normals[r][c].z;
}
}
unsigned char* pixels = new unsigned char[3 * N * N];
for (int r = 0; r < N; r++)
{
for (int c = 0; c < N; c++)
{
glm::dvec4 normal = normals[r][c];
//255.0 / (max - min) * (r[i / 3] - min)
pixels[r*N + c + 2] = static_cast<int>((255.0 * (0.50 * (normal.x + 1))) + 0.50); //red - x direction
pixels[r*N + c + 1] = static_cast<int>((255.0 * (0.50 * (normal.y + 1))) + 0.50); //green - y direction
pixels[r*N + c + 0] = static_cast<int>((255.0 * (0.50 * (normal.z + 1))) + 0.50); //blue - z direction
}
}
FIBITMAP* image = FreeImage_ConvertFromRawBits(pixels, N, N, 3 * N, 24, 0xFF0000, 0x00FF00, 0x0000FF, false);
FreeImage_Save(FIF_BMP, image, filename, 0);
FreeImage_Unload(image);
delete[] pixels;
}
void writeNormals(const std::string& path, std::vector<std::vector<glm::dvec4>>& data, int mode)
{
std::ofstream fout(path, mode);
int height = data.size();
int width = data[0].size();
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
fout << "\"=\"\"" << data[y][x].x << ":" << data[y][x].y << ":" << data[y][x].z << "\"\"\"" << ",";
}
fout << std::endl;
}
fout.close();
}
std::vector<double> const read(const std::string& path, int NxN)
{
std::ifstream stream(path);
std::string lineOut;
std::vector<double> data;
while (std::getline(stream, lineOut))
{
int start = 0;
int nextComma = 0;
while ((nextComma = lineOut.find(',', start)) != std::string::npos)
{
std::string val = lineOut.substr(start, nextComma - start);
start = nextComma + 1;
data.emplace_back(std::stod(val));
}
}
return data;
}
std::vector<std::vector<glm::dvec4>> generateNormals(double* data, int height, int width)
{
std::vector<std::vector<glm::dvec4>> normals;
int normal_size = (width - 2) * (height - 2);
std::vector<double> dfx;
std::vector<double> dfy;
for (int y = 1; y < height - 1; y++)
{
for (int x = 1; x < width - 1; x++)
{
dfx.emplace_back((data[(x + 1) + y * height] - data[(x - 1) + y * height]) / 2.0f);
dfy.emplace_back((data[x + (y + 1) * height] - data[x + (y - 1) * height]) / 2.0f);
}
}
double dfx_max = dfx[0];
double dfy_max = dfy[0];
for (int i = 0; i < normal_size; i++)
{
if (dfx[i] > dfx_max)
dfx_max = dfx[i];
if (dfy[i] > dfy_max)
dfy_max = dfy[i];
}
dfx_max = abs(dfx_max);
dfy_max = abs(dfy_max);
double max = (dfx_max > dfy_max) ? dfx_max : dfy_max;
double a = 0.99 / max; //double a = 0.1/max; TODO edit
for (int r = 0; r < height - 2; r++)
{
std::vector<glm::dvec4> row;
normals.emplace_back(row);
for (int c = 0; c < width - 2; c++)
{
double nx = a * dfx[r*(height - 2) + c];
double ny = a * dfy[r*(height - 2) + c];
double nz = sqrt(1.0 - nx * nx - ny * ny);
normals[r].emplace_back(glm::dvec4(nx, ny, nz, 1.0f));
}
}
return normals;
}
}
/*
TODOs:
http://gpgpu.org/static/sc2007/SC07_CUDA_3_Libraries.pdf
convert b/w image to spatial density
writetoimage values -> understand why
colorings is wrong -> understand why
zero padding is wrong
remove static_const<int> thing -> nah its fine
put solution on stackoverflow -> yup
read and write to data file -> yup
read and write scaled data to bitmap
make more efficient using shared memory, etc.
fix up pading currectly, so it adds it onto the image, instead of changing the existing data values of the original image
do const stuff
even faster read write methods
*/
|
Lensing.cu
|
#include "Lensing.cuh"
#include <stdlib.h>
#include <cmath>
#include <fstream>
#include <sstream>
#include <iterator>
#include <GL\glew.h>
#include <FreeImage/FreeImage.h>
namespace ginkgo
{
#define DATADEBUG 1
#define ZEROPADDING 0
std::vector<std::vector<glm::dvec4>> lensing()
{
int N =
#if ZEROPADDING
2 *
#endif
514; // ex: N = 2*32 //future: 128 X 128, N = 128 // N is the sidelength of the image
int block_size_x = 32;
int block_size_y = 32;
double *kx, *ky, *r;
kx = (double *)malloc(sizeof(double) * N);
ky = (double *)malloc(sizeof(double) * N);
r = (double *)malloc(sizeof(double) * N * N);
double *kx_d, *ky_d, *r_d;
cufftDoubleComplex *r_complex_d;
cudaMalloc((void **)&kx_d, sizeof(double) * N);
cudaMalloc((void **)&ky_d, sizeof(double) * N);
cudaMalloc((void **)&r_d, sizeof(double) * N * N);
cudaMalloc((void **)&r_complex_d, sizeof(cufftDoubleComplex) * N * N);
for (int y = 0; y < N; y++)
for (int x = 0; x < N; x++)
r[x + y * N] = sin(
exp(-((x - N / 2.0f) * (x - N / 2.0f) + (N / 2.0f - y) * (N / 2.0f - y)) / (50 * 50))
);
#if ZEROPADDING
for (int y = 0; y < N; y++)
for (int x = 0; x < N; x++)
if (x < N / 4.0 || x > N * 3.0 / 4.0 || y < N / 4.0 || y > N*3.0 / 4.0)
r[x + y*N] = 0;
#endif
double* r_inital = (double *)malloc(sizeof(double) * N * N);
for (int i = 0; i < N * N; i++)
r_inital[i] = r[i];
for (int i = 0; i < N; i++)
{
kx[i] = i - N / 2.0f; //centers kx values to be at center of image
ky[i] = N / 2.0f - i; //centers ky values to be at center of image
}
#if DATADEBUG
writeDoubleArrayToBitmapImage("DataDebug/SpatialDensity.bmp", N, r);
write("DataDebug/SpatialDensity.csv", r, N, N);
#endif
cudaMemcpy(kx_d, kx, sizeof(double) * N, cudaMemcpyHostToDevice);
cudaMemcpy(ky_d, ky, sizeof(double) * N, cudaMemcpyHostToDevice);
cudaMemcpy(r_d, r, sizeof(double) * N * N, cudaMemcpyHostToDevice);
cufftHandle plan;
cufftPlan2d(&plan, N, N, CUFFT_C2C);
/* Compute the execution configuration
NB: block_size_x*block_size_y = number of threads */
dim3 dimBlock(block_size_x, block_size_y);
dim3 dimGrid(N / dimBlock.x, N / dimBlock.y);
/* Handle N not multiple of block_size_x or block_size_y */
if (N % block_size_x != 0) dimGrid.x += 1;
if (N % block_size_y != 0) dimGrid.y += 1;
real2complex << < dimGrid, dimBlock >> > (r_complex_d, r_d, N);
cufftExecZ2Z(plan, r_complex_d, r_complex_d, CUFFT_FORWARD);
//solve_poisson << <dimGrid, dimBlock >> > (r_complex_d, kx_d, ky_d, N);
cufftExecZ2Z(plan, r_complex_d, r_complex_d, CUFFT_INVERSE);
double scale = 1.0f / (N * N);// *2E3;
complex2real_scaled << <dimGrid, dimBlock >> > (r_d, r_complex_d, scale, N);
cudaMemcpy(r, r_d, sizeof(double) * N * N, cudaMemcpyDeviceToHost);
#if DATADEBUG
writeDoubleArrayToBitmapImage("DataDebug/GravitationalPotential.bmp", N, r);
write("DataDebug/GravitationalPotential.csv", r, N, N);
#endif
std::vector<std::vector<glm::dvec4>> normals = generateNormals(r, N, N);
glm::dvec4 normal = normals[N / 2][N / 2];
std::cout << "Center: " << ":\t" << normal.x << " " << normal.y << " " << normal.z << std::endl;
//normals[center] = glm::dvec3(0.0f, 0.0f, 0.0f);
#if DATADEBUG
writeNormalsToBitmapImage("DataDebug/Normals.bmp", N - 2, normals);
writeNormals("DataDebug/Normals.csv", normals);
#endif
//system("pause");
/* Destroy plan and clean up memory on device*/
free(kx);
free(ky);
free(r);
free(r_inital);
cufftDestroy(plan);
cudaFree(r_complex_d);
cudaFree(kx_d);
std::cout << "Normal Size: " << normals.size() << "\n";
return normals;
}
__global__ void real2complex(cufftDoubleComplex *c, double *a, int N)
{
/* compute idx and idy, the location of the element in the original NxN array */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < N && idy < N)
{
int index = idx + idy * N;
c[index].x = a[index];
c[index].y = 0.0f;
}
}
__global__ void solve_poisson(cufftDoubleComplex *c, double *kx, double *ky, int N)
{
/* compute idx and idy, the location of the element in the original NxN array */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < N && idy < N)
{
int index = idx + idy * N;
double scale = -(kx[idx] * kx[idx] + ky[idy] * ky[idy]);
if (idx == N / 2 && idy == N / 2) scale = -1.0f;
scale = 1.0f / scale;
c[index].x *= scale;
c[index].y *= scale;
}
}
__global__ void complex2real_scaled(double *a, cufftDoubleComplex *c, double scale, int N)
{
/* compute idx and idy, the location of the element in the original NxN array */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < N && idy < N)
{
int index = idx + idy * N;
a[index] = scale * c[index].x;
}
}
void writeFast(const std::string& path, double* data, int NxN, int mode)
{
std::ofstream out(path.c_str(), mode);
for (int i = 0; i < NxN; i++)
out.write(reinterpret_cast<char *>(&data[i]), sizeof(double));
out.close();
}
void readFast(const std::string& path, double* data, int NxN, int mode)
{
std::ifstream in(path.c_str(), mode);
for (int i = 0; i < NxN; i++)
in.read(reinterpret_cast<char *>(&data[i]), sizeof(double));
in.close();
}
void write(const std::string& path, double* data, int height, int width, int mode)
{
std::ofstream fout(path, mode);
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
fout << data[x + y * width] << ",";
fout << std::endl;
}
fout.close();
}
void write(const std::string& path, std::vector<double>& data, int height, int width, int mode)
{
std::ofstream fout(path, mode);
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
fout << data[x + y * width] << ",";
fout << std::endl;
}
fout.close();
}
void writeCFast(const std::string& filename, const double* data, size_t size)
{
FILE* pFile;
pFile = fopen(filename.c_str(), "wb");
for (unsigned long long j = 0; j < 1024; ++j) {
//Some calculations to fill a[]
fwrite(data, 1, size * sizeof(double), pFile);
}
fclose(pFile);
}
double* readCFast(const std::string& filename, size_t size)
{
FILE* pFile;
long lSize;
double* data;
size_t result;
pFile = fopen(filename.c_str(), "rb");
if (pFile == NULL) { fputs("File error in readBOOMFast()", stderr); system("pause"); }
// obtain file size:
fseek(pFile, 0, SEEK_END);
lSize = ftell(pFile);
rewind(pFile);
// allocate memory to contain the whole file:
data = (double*)malloc(sizeof(char)*lSize);
if (data == NULL) { fputs("Memory error in readBOOMFast()", stderr); system("pause"); }
// copy the file into the buffer:
result = fread(data, 1, lSize, pFile);
if (result != lSize) { fputs("Reading error in readBOOMFast()", stderr); system("pause"); }
/* the whole file is now loaded in the memory buffer. */
// terminate
fclose(pFile);
return data; //remember to free(data)
}
void writeDoubleArrayToBitmapImage(const char* filename, int N, double *r)
{
double max = r[0];
double min = r[0];
for (int i = 0; i < N * N; i++)
{
if (r[i] > max)
max = r[i];
if (r[i] < min)
min = r[i];
}
unsigned char* pixels = new unsigned char[3 * N *N];
for (int i = 0; i < 3 * N *N; i += 3)
//for (int i = 3 * (width * height - 1); i >= 0 ; i -= 3)
{
int greyColor = static_cast<int>(255.0 / (max - min) * (r[i / 3] - min));
pixels[i + 2] = greyColor; //red
pixels[i + 1] = greyColor; //green
pixels[i + 0] = greyColor; //blue
}
FIBITMAP* image = FreeImage_ConvertFromRawBits(pixels, N, N, 3 * N, 24, 0xFF0000, 0x00FF00, 0x0000FF, false);
FreeImage_FlipVertical(image);
FreeImage_Save(FIF_BMP, image, filename, 0);
FreeImage_Unload(image);
delete[] pixels;
}
void writeNormalsToBitmapImage(const char* filename, int N, const std::vector<std::vector<glm::dvec4>>& normals)
{
glm::dvec4 max = normals[0][0];
glm::dvec4 min = normals[0][0];
for (int r = 0; r < N; r++)
{
for (int c = 0; c < N; c++)
{
if (normals[r][c].x > max.x)
max.x = normals[r][c].x;
if (normals[r][c].y > max.y)
max.y = normals[r][c].y;
if (normals[r][c].z > max.z)
max.z = normals[r][c].z;
if (normals[r][c].x < min.x)
min.x = normals[r][c].x;
if (normals[r][c].y < min.y)
min.y = normals[r][c].y;
if (normals[r][c].z < min.z)
min.z = normals[r][c].z;
}
}
unsigned char* pixels = new unsigned char[3 * N * N];
for (int r = 0; r < N; r++)
{
for (int c = 0; c < N; c++)
{
glm::dvec4 normal = normals[r][c];
//255.0 / (max - min) * (r[i / 3] - min)
pixels[r*N + c + 2] = static_cast<int>((255.0 * (0.50 * (normal.x + 1))) + 0.50); //red - x direction
pixels[r*N + c + 1] = static_cast<int>((255.0 * (0.50 * (normal.y + 1))) + 0.50); //green - y direction
pixels[r*N + c + 0] = static_cast<int>((255.0 * (0.50 * (normal.z + 1))) + 0.50); //blue - z direction
}
}
FIBITMAP* image = FreeImage_ConvertFromRawBits(pixels, N, N, 3 * N, 24, 0xFF0000, 0x00FF00, 0x0000FF, false);
FreeImage_Save(FIF_BMP, image, filename, 0);
FreeImage_Unload(image);
delete[] pixels;
}
void writeNormals(const std::string& path, std::vector<std::vector<glm::dvec4>>& data, int mode)
{
std::ofstream fout(path, mode);
int height = data.size();
int width = data[0].size();
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
fout << "\"=\"\"" << data[y][x].x << ":" << data[y][x].y << ":" << data[y][x].z << "\"\"\"" << ",";
}
fout << std::endl;
}
fout.close();
}
std::vector<double> const read(const std::string& path, int NxN)
{
std::ifstream stream(path);
std::string lineOut;
std::vector<double> data;
while (std::getline(stream, lineOut))
{
int start = 0;
int nextComma = 0;
while ((nextComma = lineOut.find(',', start)) != std::string::npos)
{
std::string val = lineOut.substr(start, nextComma - start);
start = nextComma + 1;
data.emplace_back(std::stod(val));
}
}
return data;
}
std::vector<std::vector<glm::dvec4>> generateNormals(double* data, int height, int width)
{
std::vector<std::vector<glm::dvec4>> normals;
int normal_size = (width - 2) * (height - 2);
std::vector<double> dfx;
std::vector<double> dfy;
for (int y = 1; y < height - 1; y++)
{
for (int x = 1; x < width - 1; x++)
{
dfx.emplace_back((data[(x + 1) + y * height] - data[(x - 1) + y * height]) / 2.0f);
dfy.emplace_back((data[x + (y + 1) * height] - data[x + (y - 1) * height]) / 2.0f);
}
}
double dfx_max = dfx[0];
double dfy_max = dfy[0];
for (int i = 0; i < normal_size; i++)
{
if (dfx[i] > dfx_max)
dfx_max = dfx[i];
if (dfy[i] > dfy_max)
dfy_max = dfy[i];
}
dfx_max = abs(dfx_max);
dfy_max = abs(dfy_max);
double max = (dfx_max > dfy_max) ? dfx_max : dfy_max;
double a = 0.99 / max; //double a = 0.1/max; TODO edit
for (int r = 0; r < height - 2; r++)
{
std::vector<glm::dvec4> row;
normals.emplace_back(row);
for (int c = 0; c < width - 2; c++)
{
double nx = a * dfx[r*(height - 2) + c];
double ny = a * dfy[r*(height - 2) + c];
double nz = sqrt(1.0 - nx * nx - ny * ny);
normals[r].emplace_back(glm::dvec4(nx, ny, nz, 1.0f));
}
}
return normals;
}
}
/*
TODOs:
http://gpgpu.org/static/sc2007/SC07_CUDA_3_Libraries.pdf
convert b/w image to spatial density
writetoimage values -> understand why
colorings is wrong -> understand why
zero padding is wrong
remove static_const<int> thing -> nah its fine
put solution on stackoverflow -> yup
read and write to data file -> yup
read and write scaled data to bitmap
make more efficient using shared memory, etc.
fix up pading currectly, so it adds it onto the image, instead of changing the existing data values of the original image
do const stuff
even faster read write methods
*/
|
881b12e1eaf0e60dafa49b16a1fe22f78b275f5b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include "image_template.h"
#include <math.h>
#include<sys/time.h>
//A consolidated function that creates both the Gaussian kernel and the derivative kernel
void create_gaussians(float **gaussian_kernel,float **gaussian_deriv,int k_width,float sigma)
{
int i,j;
float sum=0;
int a=k_width/2;
*gaussian_kernel=(float *)malloc(sizeof(float)*k_width);
*gaussian_deriv=(float *)malloc(sizeof(float)*k_width);
//Create kernel
sum=0;
for(i=0;i<k_width;i++)
{
(*gaussian_kernel)[i]=exp((-1*(i-a)*(i-a))/(2*sigma*sigma));
sum+=(*gaussian_kernel)[i];
}
for(i=0;i<k_width;i++)
(*gaussian_kernel)[i]/=sum;
//Create derivative
sum=0;
for(i=0;i<k_width;i++)
{
(*gaussian_deriv)[i]=-1*(i-a)*exp((-1*(i-a)*(i-a))/(2*sigma*sigma));
sum-=i*((*gaussian_deriv)[i]);
}
for(i=0;i<k_width;i++)
(*gaussian_deriv)[i]/=sum;
}
//A GPU kernel for convolution. Input image can be either int or float BUT the output is always float
__global__
void convolve(float *in_image,int width,int height,float *mask,int mask_width,int mask_height,float *out_image)
{
int i,j,k,m;
float sum;
int offseti,offsetj;
i=blockIdx.x*blockDim.x + threadIdx.x;
j=blockIdx.y*blockDim.y + threadIdx.y;
if(i<height && j <width)
{
sum=0;
for(k=0;k<mask_height;k++)
{
for(m=0;m<mask_width;m++)
{
offseti= -1*(mask_height/2)+k;
offsetj= -1*(mask_width/2)+m;
if(i+offseti >=0 && i+offseti<height && j+offsetj>=0 && j+offsetj<width)
sum+=(float)(in_image[(i+offseti)*width+(j+offsetj)])*mask[k*mask_width+m];
}
}
out_image[i*width+j]=(float)sum;
}
}
int main(int argc, char **argv)
{
//Declare all of the variable here
float *org_img;
//GPU device buffer for original image
float *d_org_img;
//CPU host buffers for the final output
float *vertical_gradient,*horizontal_gradient;
//GPU buffers for the final result
float *d_vertical_gradient,*d_horizontal_gradient;
//GPU buffers to hold intermediate convolution results
float *d_temp_horizontal,*d_temp_vertical;
//CPU host buffers to store the convolution masks
float *gaussian_kernel,*gaussian_deriv;
//GPU device buffers to store the convolution masks
float *d_gaussian_kernel,*d_gaussian_deriv;
int width,height,k_width;
float sigma,a;
struct timeval start,end;
if(argc!=3)
{
printf("\n The correct argument list is: exec <image file> <Sigma> \n");
exit(0);
}
//obtain the parameters
sigma=atof(argv[2]);
a=ceil((float)(2.5*sigma-0.5));
k_width=2*a+1;
//CPU portion of the code that reads/prepares the input data
read_image_template<float>(argv[1],&org_img,&width,&height);
//Computation starts here
gettimeofday(&start,NULL);
create_gaussians(&gaussian_kernel,&gaussian_deriv,k_width,sigma);
horizontal_gradient=(float *)malloc(sizeof(float)*width*height);
vertical_gradient=(float *)malloc(sizeof(float)*width*height);
// CPU host mallocs for GPU buffers
hipMalloc((void **)&d_org_img,sizeof(float)*width*height);
hipMalloc((void **)&d_temp_horizontal,sizeof(float)*width*height);
hipMalloc((void **)&d_temp_vertical,sizeof(float)*width*height);
hipMalloc((void **)&d_horizontal_gradient,sizeof(float)*width*height);
hipMalloc((void **)&d_vertical_gradient,sizeof(float)*width*height);
hipMalloc((void **)&d_gaussian_kernel,sizeof(float)*k_width);
hipMalloc((void **)&d_gaussian_deriv,sizeof(float)*k_width);
//Offload all of the data to GPU device for convolution
hipMemcpy(d_org_img,org_img,sizeof(float)*width*height,hipMemcpyHostToDevice);
hipMemcpy(d_gaussian_kernel,gaussian_kernel,sizeof(float)*k_width,hipMemcpyHostToDevice);
hipMemcpy(d_gaussian_deriv,gaussian_deriv,sizeof(float)*k_width,hipMemcpyHostToDevice);
//Horizontal gradient. vertical kernel then horizontal derivative
int block_dim=16;
dim3 dimGrid(ceil(height/block_dim),ceil(width/block_dim),1);
dim3 dimBlock(block_dim,block_dim,1);
hipLaunchKernelGGL(( convolve), dim3(dimGrid),dim3(dimBlock), 0, 0, d_org_img,width,height,d_gaussian_kernel,1,k_width,d_temp_horizontal);
hipLaunchKernelGGL(( convolve), dim3(dimGrid),dim3(dimBlock), 0, 0, d_temp_horizontal,width,height,d_gaussian_deriv,k_width,1,d_horizontal_gradient);
//Vertical gradient. horizontal kernel then vertical derivative
hipLaunchKernelGGL(( convolve), dim3(dimGrid),dim3(dimBlock), 0, 0, d_org_img,width,height,d_gaussian_kernel,k_width,1,d_temp_vertical);
hipLaunchKernelGGL(( convolve), dim3(dimGrid),dim3(dimBlock), 0, 0, d_temp_vertical,width,height,d_gaussian_deriv,1,k_width,d_vertical_gradient);
//GPU to Host transfer of the final result
hipMemcpy(horizontal_gradient,d_horizontal_gradient,sizeof(float)*width*height,hipMemcpyDeviceToHost);
hipMemcpy(vertical_gradient,d_vertical_gradient,sizeof(float)*width*height,hipMemcpyDeviceToHost);
hipDeviceSynchronize();
gettimeofday(&end,NULL);
printf("%d, %ld\n", width, ((end.tv_sec * 1000 + end.tv_usec/1000)
- (start.tv_sec * 1000 + start.tv_usec/1000)));
free(org_img);
free(horizontal_gradient);
free(vertical_gradient);
free(gaussian_kernel);
free(gaussian_deriv);
hipFree(d_org_img);
hipFree(d_gaussian_kernel);
hipFree(d_gaussian_deriv);
hipFree(d_temp_horizontal);
hipFree(d_temp_vertical);
hipFree(d_vertical_gradient);
hipFree(d_horizontal_gradient);
return 0;
}
|
881b12e1eaf0e60dafa49b16a1fe22f78b275f5b.cu
|
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include "image_template.h"
#include <math.h>
#include<sys/time.h>
//A consolidated function that creates both the Gaussian kernel and the derivative kernel
void create_gaussians(float **gaussian_kernel,float **gaussian_deriv,int k_width,float sigma)
{
int i,j;
float sum=0;
int a=k_width/2;
*gaussian_kernel=(float *)malloc(sizeof(float)*k_width);
*gaussian_deriv=(float *)malloc(sizeof(float)*k_width);
//Create kernel
sum=0;
for(i=0;i<k_width;i++)
{
(*gaussian_kernel)[i]=exp((-1*(i-a)*(i-a))/(2*sigma*sigma));
sum+=(*gaussian_kernel)[i];
}
for(i=0;i<k_width;i++)
(*gaussian_kernel)[i]/=sum;
//Create derivative
sum=0;
for(i=0;i<k_width;i++)
{
(*gaussian_deriv)[i]=-1*(i-a)*exp((-1*(i-a)*(i-a))/(2*sigma*sigma));
sum-=i*((*gaussian_deriv)[i]);
}
for(i=0;i<k_width;i++)
(*gaussian_deriv)[i]/=sum;
}
//A GPU kernel for convolution. Input image can be either int or float BUT the output is always float
__global__
void convolve(float *in_image,int width,int height,float *mask,int mask_width,int mask_height,float *out_image)
{
int i,j,k,m;
float sum;
int offseti,offsetj;
i=blockIdx.x*blockDim.x + threadIdx.x;
j=blockIdx.y*blockDim.y + threadIdx.y;
if(i<height && j <width)
{
sum=0;
for(k=0;k<mask_height;k++)
{
for(m=0;m<mask_width;m++)
{
offseti= -1*(mask_height/2)+k;
offsetj= -1*(mask_width/2)+m;
if(i+offseti >=0 && i+offseti<height && j+offsetj>=0 && j+offsetj<width)
sum+=(float)(in_image[(i+offseti)*width+(j+offsetj)])*mask[k*mask_width+m];
}
}
out_image[i*width+j]=(float)sum;
}
}
int main(int argc, char **argv)
{
//Declare all of the variable here
float *org_img;
//GPU device buffer for original image
float *d_org_img;
//CPU host buffers for the final output
float *vertical_gradient,*horizontal_gradient;
//GPU buffers for the final result
float *d_vertical_gradient,*d_horizontal_gradient;
//GPU buffers to hold intermediate convolution results
float *d_temp_horizontal,*d_temp_vertical;
//CPU host buffers to store the convolution masks
float *gaussian_kernel,*gaussian_deriv;
//GPU device buffers to store the convolution masks
float *d_gaussian_kernel,*d_gaussian_deriv;
int width,height,k_width;
float sigma,a;
struct timeval start,end;
if(argc!=3)
{
printf("\n The correct argument list is: exec <image file> <Sigma> \n");
exit(0);
}
//obtain the parameters
sigma=atof(argv[2]);
a=ceil((float)(2.5*sigma-0.5));
k_width=2*a+1;
//CPU portion of the code that reads/prepares the input data
read_image_template<float>(argv[1],&org_img,&width,&height);
//Computation starts here
gettimeofday(&start,NULL);
create_gaussians(&gaussian_kernel,&gaussian_deriv,k_width,sigma);
horizontal_gradient=(float *)malloc(sizeof(float)*width*height);
vertical_gradient=(float *)malloc(sizeof(float)*width*height);
// CPU host mallocs for GPU buffers
cudaMalloc((void **)&d_org_img,sizeof(float)*width*height);
cudaMalloc((void **)&d_temp_horizontal,sizeof(float)*width*height);
cudaMalloc((void **)&d_temp_vertical,sizeof(float)*width*height);
cudaMalloc((void **)&d_horizontal_gradient,sizeof(float)*width*height);
cudaMalloc((void **)&d_vertical_gradient,sizeof(float)*width*height);
cudaMalloc((void **)&d_gaussian_kernel,sizeof(float)*k_width);
cudaMalloc((void **)&d_gaussian_deriv,sizeof(float)*k_width);
//Offload all of the data to GPU device for convolution
cudaMemcpy(d_org_img,org_img,sizeof(float)*width*height,cudaMemcpyHostToDevice);
cudaMemcpy(d_gaussian_kernel,gaussian_kernel,sizeof(float)*k_width,cudaMemcpyHostToDevice);
cudaMemcpy(d_gaussian_deriv,gaussian_deriv,sizeof(float)*k_width,cudaMemcpyHostToDevice);
//Horizontal gradient. vertical kernel then horizontal derivative
int block_dim=16;
dim3 dimGrid(ceil(height/block_dim),ceil(width/block_dim),1);
dim3 dimBlock(block_dim,block_dim,1);
convolve<<<dimGrid,dimBlock>>>(d_org_img,width,height,d_gaussian_kernel,1,k_width,d_temp_horizontal);
convolve<<<dimGrid,dimBlock>>>(d_temp_horizontal,width,height,d_gaussian_deriv,k_width,1,d_horizontal_gradient);
//Vertical gradient. horizontal kernel then vertical derivative
convolve<<<dimGrid,dimBlock>>>(d_org_img,width,height,d_gaussian_kernel,k_width,1,d_temp_vertical);
convolve<<<dimGrid,dimBlock>>>(d_temp_vertical,width,height,d_gaussian_deriv,1,k_width,d_vertical_gradient);
//GPU to Host transfer of the final result
cudaMemcpy(horizontal_gradient,d_horizontal_gradient,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
cudaMemcpy(vertical_gradient,d_vertical_gradient,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
gettimeofday(&end,NULL);
printf("%d, %ld\n", width, ((end.tv_sec * 1000 + end.tv_usec/1000)
- (start.tv_sec * 1000 + start.tv_usec/1000)));
free(org_img);
free(horizontal_gradient);
free(vertical_gradient);
free(gaussian_kernel);
free(gaussian_deriv);
cudaFree(d_org_img);
cudaFree(d_gaussian_kernel);
cudaFree(d_gaussian_deriv);
cudaFree(d_temp_horizontal);
cudaFree(d_temp_vertical);
cudaFree(d_vertical_gradient);
cudaFree(d_horizontal_gradient);
return 0;
}
|
6314a5173935f78d62d98e42a1c46852c2095748.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <functions/logisticReg.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct LogRegLossInputs {
T tolerance;
T n_rows;
T n_cols;
int len;
};
template <typename T>
class LogRegLossTest : public ::testing::TestWithParam<LogRegLossInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<LogRegLossInputs<T>>::GetParam();
int len = params.len;
int n_rows = params.n_rows;
int n_cols = params.n_cols;
T *labels, *coef;
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
allocator.reset(new raft::mr::device::default_allocator);
raft::allocate(in, len);
raft::allocate(out, 1);
raft::allocate(out_lasso, 1);
raft::allocate(out_ridge, 1);
raft::allocate(out_elasticnet, 1);
raft::allocate(out_grad, n_cols);
raft::allocate(out_lasso_grad, n_cols);
raft::allocate(out_ridge_grad, n_cols);
raft::allocate(out_elasticnet_grad, n_cols);
raft::allocate(out_ref, 1);
raft::allocate(out_lasso_ref, 1);
raft::allocate(out_ridge_ref, 1);
raft::allocate(out_elasticnet_ref, 1);
raft::allocate(out_grad_ref, n_cols);
raft::allocate(out_lasso_grad_ref, n_cols);
raft::allocate(out_ridge_grad_ref, n_cols);
raft::allocate(out_elasticnet_grad_ref, n_cols);
raft::allocate(labels, params.n_rows);
raft::allocate(coef, params.n_cols);
T h_in[len] = {0.1, 0.35, -0.9, -1.4, 2.0, 3.1};
raft::update_device(in, h_in, len, stream);
T h_labels[n_rows] = {0.3, 2.0, -1.1};
raft::update_device(labels, h_labels, n_rows, stream);
T h_coef[n_cols] = {0.35, -0.24};
raft::update_device(coef, h_coef, n_cols, stream);
T h_out_ref[1] = {0.38752545};
raft::update_device(out_ref, h_out_ref, 1, stream);
T h_out_lasso_ref[1] = {0.74152};
raft::update_device(out_lasso_ref, h_out_lasso_ref, 1, stream);
T h_out_ridge_ref[1] = {0.4955854};
raft::update_device(out_ridge_ref, h_out_ridge_ref, 1, stream);
T h_out_elasticnet_ref[1] = {0.618555};
raft::update_device(out_elasticnet_ref, h_out_elasticnet_ref, 1, stream);
T h_out_grad_ref[n_cols] = {-0.58284, 0.207666};
raft::update_device(out_grad_ref, h_out_grad_ref, n_cols, stream);
T h_out_lasso_grad_ref[n_cols] = {0.0171, -0.39233};
raft::update_device(out_lasso_grad_ref, h_out_lasso_grad_ref, n_cols,
stream);
T h_out_ridge_grad_ref[n_cols] = {-0.16284, -0.080333};
raft::update_device(out_ridge_grad_ref, h_out_ridge_grad_ref, n_cols,
stream);
T h_out_elasticnet_grad_ref[n_cols] = {-0.07284, -0.23633};
raft::update_device(out_elasticnet_grad_ref, h_out_elasticnet_grad_ref,
n_cols, stream);
T alpha = 0.6;
T l1_ratio = 0.5;
logisticRegLoss(in, params.n_rows, params.n_cols, labels, coef, out,
penalty::NONE, alpha, l1_ratio, cublas_handle, allocator,
stream);
raft::update_device(in, h_in, len, stream);
logisticRegLossGrads(in, params.n_rows, params.n_cols, labels, coef,
out_grad, penalty::NONE, alpha, l1_ratio,
cublas_handle, allocator, stream);
raft::update_device(in, h_in, len, stream);
logisticRegLoss(in, params.n_rows, params.n_cols, labels, coef, out_lasso,
penalty::L1, alpha, l1_ratio, cublas_handle, allocator,
stream);
raft::update_device(in, h_in, len, stream);
logisticRegLossGrads(in, params.n_rows, params.n_cols, labels, coef,
out_lasso_grad, penalty::L1, alpha, l1_ratio,
cublas_handle, allocator, stream);
raft::update_device(in, h_in, len, stream);
logisticRegLoss(in, params.n_rows, params.n_cols, labels, coef, out_ridge,
penalty::L2, alpha, l1_ratio, cublas_handle, allocator,
stream);
logisticRegLossGrads(in, params.n_rows, params.n_cols, labels, coef,
out_ridge_grad, penalty::L2, alpha, l1_ratio,
cublas_handle, allocator, stream);
raft::update_device(in, h_in, len, stream);
logisticRegLoss(in, params.n_rows, params.n_cols, labels, coef,
out_elasticnet, penalty::ELASTICNET, alpha, l1_ratio,
cublas_handle, allocator, stream);
logisticRegLossGrads(in, params.n_rows, params.n_cols, labels, coef,
out_elasticnet_grad, penalty::ELASTICNET, alpha,
l1_ratio, cublas_handle, allocator, stream);
raft::update_device(in, h_in, len, stream);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(coef));
}
void TearDown() override {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(out_lasso));
CUDA_CHECK(hipFree(out_ridge));
CUDA_CHECK(hipFree(out_elasticnet));
CUDA_CHECK(hipFree(out_grad));
CUDA_CHECK(hipFree(out_lasso_grad));
CUDA_CHECK(hipFree(out_ridge_grad));
CUDA_CHECK(hipFree(out_elasticnet_grad));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out_lasso_ref));
CUDA_CHECK(hipFree(out_ridge_ref));
CUDA_CHECK(hipFree(out_elasticnet_ref));
CUDA_CHECK(hipFree(out_grad_ref));
CUDA_CHECK(hipFree(out_lasso_grad_ref));
CUDA_CHECK(hipFree(out_ridge_grad_ref));
CUDA_CHECK(hipFree(out_elasticnet_grad_ref));
}
protected:
LogRegLossInputs<T> params;
T *in;
T *out, *out_lasso, *out_ridge, *out_elasticnet;
T *out_ref, *out_lasso_ref, *out_ridge_ref, *out_elasticnet_ref;
T *out_grad, *out_lasso_grad, *out_ridge_grad, *out_elasticnet_grad;
T *out_grad_ref, *out_lasso_grad_ref, *out_ridge_grad_ref,
*out_elasticnet_grad_ref;
std::shared_ptr<deviceAllocator> allocator;
};
const std::vector<LogRegLossInputs<float>> inputsf = {{0.01f, 3, 2, 6}};
const std::vector<LogRegLossInputs<double>> inputsd = {{0.01, 3, 2, 6}};
typedef LogRegLossTest<float> LogRegLossTestF;
TEST_P(LogRegLossTestF, Result) {
ASSERT_TRUE(raft::devArrMatch(out_ref, out, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_lasso_ref, out_lasso, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ridge_ref, out_ridge, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_grad_ref, out_grad, params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_lasso_grad_ref, out_lasso_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ridge_grad_ref, out_ridge_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
}
typedef LogRegLossTest<double> LogRegLossTestD;
TEST_P(LogRegLossTestD, Result) {
ASSERT_TRUE(raft::devArrMatch(out_ref, out, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_lasso_ref, out_lasso, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ridge_ref, out_ridge, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_grad_ref, out_grad, params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_lasso_grad_ref, out_lasso_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ridge_grad_ref, out_ridge_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(LogRegLossTests, LogRegLossTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(LogRegLossTests, LogRegLossTestD,
::testing::ValuesIn(inputsd));
} // end namespace Functions
} // end namespace MLCommon
|
6314a5173935f78d62d98e42a1c46852c2095748.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <functions/logisticReg.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct LogRegLossInputs {
T tolerance;
T n_rows;
T n_cols;
int len;
};
template <typename T>
class LogRegLossTest : public ::testing::TestWithParam<LogRegLossInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<LogRegLossInputs<T>>::GetParam();
int len = params.len;
int n_rows = params.n_rows;
int n_cols = params.n_cols;
T *labels, *coef;
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
allocator.reset(new raft::mr::device::default_allocator);
raft::allocate(in, len);
raft::allocate(out, 1);
raft::allocate(out_lasso, 1);
raft::allocate(out_ridge, 1);
raft::allocate(out_elasticnet, 1);
raft::allocate(out_grad, n_cols);
raft::allocate(out_lasso_grad, n_cols);
raft::allocate(out_ridge_grad, n_cols);
raft::allocate(out_elasticnet_grad, n_cols);
raft::allocate(out_ref, 1);
raft::allocate(out_lasso_ref, 1);
raft::allocate(out_ridge_ref, 1);
raft::allocate(out_elasticnet_ref, 1);
raft::allocate(out_grad_ref, n_cols);
raft::allocate(out_lasso_grad_ref, n_cols);
raft::allocate(out_ridge_grad_ref, n_cols);
raft::allocate(out_elasticnet_grad_ref, n_cols);
raft::allocate(labels, params.n_rows);
raft::allocate(coef, params.n_cols);
T h_in[len] = {0.1, 0.35, -0.9, -1.4, 2.0, 3.1};
raft::update_device(in, h_in, len, stream);
T h_labels[n_rows] = {0.3, 2.0, -1.1};
raft::update_device(labels, h_labels, n_rows, stream);
T h_coef[n_cols] = {0.35, -0.24};
raft::update_device(coef, h_coef, n_cols, stream);
T h_out_ref[1] = {0.38752545};
raft::update_device(out_ref, h_out_ref, 1, stream);
T h_out_lasso_ref[1] = {0.74152};
raft::update_device(out_lasso_ref, h_out_lasso_ref, 1, stream);
T h_out_ridge_ref[1] = {0.4955854};
raft::update_device(out_ridge_ref, h_out_ridge_ref, 1, stream);
T h_out_elasticnet_ref[1] = {0.618555};
raft::update_device(out_elasticnet_ref, h_out_elasticnet_ref, 1, stream);
T h_out_grad_ref[n_cols] = {-0.58284, 0.207666};
raft::update_device(out_grad_ref, h_out_grad_ref, n_cols, stream);
T h_out_lasso_grad_ref[n_cols] = {0.0171, -0.39233};
raft::update_device(out_lasso_grad_ref, h_out_lasso_grad_ref, n_cols,
stream);
T h_out_ridge_grad_ref[n_cols] = {-0.16284, -0.080333};
raft::update_device(out_ridge_grad_ref, h_out_ridge_grad_ref, n_cols,
stream);
T h_out_elasticnet_grad_ref[n_cols] = {-0.07284, -0.23633};
raft::update_device(out_elasticnet_grad_ref, h_out_elasticnet_grad_ref,
n_cols, stream);
T alpha = 0.6;
T l1_ratio = 0.5;
logisticRegLoss(in, params.n_rows, params.n_cols, labels, coef, out,
penalty::NONE, alpha, l1_ratio, cublas_handle, allocator,
stream);
raft::update_device(in, h_in, len, stream);
logisticRegLossGrads(in, params.n_rows, params.n_cols, labels, coef,
out_grad, penalty::NONE, alpha, l1_ratio,
cublas_handle, allocator, stream);
raft::update_device(in, h_in, len, stream);
logisticRegLoss(in, params.n_rows, params.n_cols, labels, coef, out_lasso,
penalty::L1, alpha, l1_ratio, cublas_handle, allocator,
stream);
raft::update_device(in, h_in, len, stream);
logisticRegLossGrads(in, params.n_rows, params.n_cols, labels, coef,
out_lasso_grad, penalty::L1, alpha, l1_ratio,
cublas_handle, allocator, stream);
raft::update_device(in, h_in, len, stream);
logisticRegLoss(in, params.n_rows, params.n_cols, labels, coef, out_ridge,
penalty::L2, alpha, l1_ratio, cublas_handle, allocator,
stream);
logisticRegLossGrads(in, params.n_rows, params.n_cols, labels, coef,
out_ridge_grad, penalty::L2, alpha, l1_ratio,
cublas_handle, allocator, stream);
raft::update_device(in, h_in, len, stream);
logisticRegLoss(in, params.n_rows, params.n_cols, labels, coef,
out_elasticnet, penalty::ELASTICNET, alpha, l1_ratio,
cublas_handle, allocator, stream);
logisticRegLossGrads(in, params.n_rows, params.n_cols, labels, coef,
out_elasticnet_grad, penalty::ELASTICNET, alpha,
l1_ratio, cublas_handle, allocator, stream);
raft::update_device(in, h_in, len, stream);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(coef));
}
void TearDown() override {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(out_lasso));
CUDA_CHECK(cudaFree(out_ridge));
CUDA_CHECK(cudaFree(out_elasticnet));
CUDA_CHECK(cudaFree(out_grad));
CUDA_CHECK(cudaFree(out_lasso_grad));
CUDA_CHECK(cudaFree(out_ridge_grad));
CUDA_CHECK(cudaFree(out_elasticnet_grad));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out_lasso_ref));
CUDA_CHECK(cudaFree(out_ridge_ref));
CUDA_CHECK(cudaFree(out_elasticnet_ref));
CUDA_CHECK(cudaFree(out_grad_ref));
CUDA_CHECK(cudaFree(out_lasso_grad_ref));
CUDA_CHECK(cudaFree(out_ridge_grad_ref));
CUDA_CHECK(cudaFree(out_elasticnet_grad_ref));
}
protected:
LogRegLossInputs<T> params;
T *in;
T *out, *out_lasso, *out_ridge, *out_elasticnet;
T *out_ref, *out_lasso_ref, *out_ridge_ref, *out_elasticnet_ref;
T *out_grad, *out_lasso_grad, *out_ridge_grad, *out_elasticnet_grad;
T *out_grad_ref, *out_lasso_grad_ref, *out_ridge_grad_ref,
*out_elasticnet_grad_ref;
std::shared_ptr<deviceAllocator> allocator;
};
const std::vector<LogRegLossInputs<float>> inputsf = {{0.01f, 3, 2, 6}};
const std::vector<LogRegLossInputs<double>> inputsd = {{0.01, 3, 2, 6}};
typedef LogRegLossTest<float> LogRegLossTestF;
TEST_P(LogRegLossTestF, Result) {
ASSERT_TRUE(raft::devArrMatch(out_ref, out, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_lasso_ref, out_lasso, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ridge_ref, out_ridge, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_grad_ref, out_grad, params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_lasso_grad_ref, out_lasso_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ridge_grad_ref, out_ridge_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
}
typedef LogRegLossTest<double> LogRegLossTestD;
TEST_P(LogRegLossTestD, Result) {
ASSERT_TRUE(raft::devArrMatch(out_ref, out, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_lasso_ref, out_lasso, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ridge_ref, out_ridge, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_grad_ref, out_grad, params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_lasso_grad_ref, out_lasso_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ridge_grad_ref, out_ridge_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(LogRegLossTests, LogRegLossTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(LogRegLossTests, LogRegLossTestD,
::testing::ValuesIn(inputsd));
} // end namespace Functions
} // end namespace MLCommon
|
5841d75e8141037dabebe73196565fb367d8737c.hip
|
// !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
|
5841d75e8141037dabebe73196565fb367d8737c.cu
|
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
73857bbb296606f430af0b46cd3429508351286f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_utils.h"
#include <cub/hipcub/hipcub.hpp>
#include <memory>
namespace CudaUtils {
/** this function is used to modify the elements.
* @param, the pointer to 2-D tensor keys tensor, corresponding shape is
* [batchsize * slot_num, max_nnz]
* @param fn, how to modify each element. input element, output its modified value.
* @param elem_size, how many elements.
*/
template <typename input_type, typename Func>
__global__ void modify_elements(input_type* keys_ptr, Func fn, const size_t elem_size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
for (size_t i = gid; i < elem_size; i += strid) {
keys_ptr[i] = fn(keys_ptr[i]);
}
}
template <typename Func, typename input_type, typename output_type>
__global__ void modify_elements(const input_type* input_ptr, output_type* output_ptr, Func fn, const size_t elem_size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
for (size_t i = gid; i < elem_size; i += strid) {
output_ptr[i] = fn(input_ptr[i]);
}
}
template <typename Func, typename input_type>
__global__ void binary_vector(const input_type* input, const size_t elem_size,
const size_t gpu_count, const size_t dev_id,
Func fn, bool* binary_out, const size_t slot_num = 0) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
for (size_t i = gid; i < elem_size; i += strid) {
binary_out[i] = (fn(input[i], gpu_count, dev_id, slot_num) ? true : false);
}
}
template <typename T>
__global__ void localized_new_row_indices(const T* row_indices, T* dev_row_indices,
const size_t slot_num, const size_t dev_slot_num,
const size_t gpu_count, const size_t elem_size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
T batch_idx = 0, slot_idx = 0, dev_slot_idx = 0;
for (size_t i = gid; i < elem_size; i += strid){
batch_idx = row_indices[i] / slot_num;
slot_idx = row_indices[i] % slot_num;
dev_slot_idx = slot_idx / gpu_count;
dev_row_indices[i] = static_cast<T>(batch_idx * dev_slot_num + dev_slot_idx);
}
}
template <typename T>
__global__ void kernel_print(T value) {
printf("%d\n", value);
}
template <typename T>
void kernel_print(T value, hipStream_t stream) {
hipLaunchKernelGGL(( kernel_print), dim3(1), dim3(1), 0, stream, value);
}
template <typename input_type>
void distributed_binary_vector(const input_type* input_values, const size_t elem_size,
const size_t gpu_count, const size_t dev_id,
bool* binary_out, hipStream_t stream) {
int block_dim = 128;
int grid_dim = (elem_size + block_dim - 1) / block_dim;
auto fn = [] __device__(const input_type value, const size_t gpu_count,
const size_t dev_id, const size_t slot_num) -> bool {
return ((dev_id == value % gpu_count) ? true : false);
};
hipLaunchKernelGGL(( binary_vector), dim3(grid_dim), dim3(block_dim), 0, stream, input_values, elem_size, gpu_count, dev_id, fn, binary_out);
}
template <typename input_type>
void localized_binary_vector(const input_type* input_row_indices, const size_t elem_size,
const size_t gpu_count, const size_t dev_id, const size_t slot_num,
bool* binary_out, hipStream_t stream){
int block_dim = 128;
int grid_dim = (elem_size + block_dim - 1) / block_dim;
auto fn = [] __device__(const input_type row_indice, const size_t gpu_count,
const size_t dev_id, const size_t slot_num) -> bool {
input_type slot_idx = row_indice % slot_num;
return ((dev_id == slot_idx % gpu_count) ? true : false);
};
hipLaunchKernelGGL(( binary_vector), dim3(grid_dim), dim3(block_dim), 0, stream, input_row_indices, elem_size, gpu_count, dev_id, fn, binary_out, slot_num);
}
template <typename T>
void localized_new_row_indices(const T* row_indices, T* dev_row_indices, const size_t slot_num,
const size_t dev_slot_num, const size_t gpu_count, const size_t elem_size,
hipStream_t stream) {
int block_dim = 128;
int grid_dim = (elem_size + block_dim - 1) / block_dim;
hipLaunchKernelGGL(( localized_new_row_indices), dim3(grid_dim), dim3(block_dim), 0, stream, row_indices, dev_row_indices, slot_num,
dev_slot_num, gpu_count, elem_size);
}
template <typename input_type, typename output_type>
void cast_elements(const input_type* input_ptr, output_type* output_ptr, const size_t elem_size,
const size_t sm_count, hipStream_t stream) {
int block_dim = 128;
// size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
int grid_dim = (elem_size + block_dim - 1) / block_dim;
auto cast_fn = [] __device__ (input_type num) -> output_type { return static_cast<output_type>(num); };
hipLaunchKernelGGL(( modify_elements), dim3(grid_dim), dim3(block_dim), 0, stream, input_ptr, output_ptr, cast_fn, elem_size);
}
/*get the roof of the number*/
size_t num_roof(const size_t number, const size_t base) {
return ((number + base - 1) / base) * base;
}
/*warpper of hipcub::DeviceSelect*/
template <typename input_type, typename flag_type, typename output_type>
hipError_t cub_flagged(void* d_temp_storage, size_t& temp_storage_bytes, input_type* d_in,
flag_type* d_flags, output_type* d_out, size_t* d_num_selected_out,
int num_items, hipStream_t stream, bool debug_synchronous) {
return hipcub::DeviceSelect::Flagged(d_temp_storage,
temp_storage_bytes,
d_in,
d_flags,
d_out,
d_num_selected_out,
num_items,
stream,
debug_synchronous);
}
template <typename T>
T* CudaAllocator<T>::allocate(size_t n) {
T* result = nullptr;
result = static_cast<T*>(malloc(n * sizeof(T)));
if (!result) throw std::bad_alloc();
return result;
}
template <typename T>
T* CudaHostAllocator<T>::allocate(size_t n) {
T* result = nullptr;
result = static_cast<T*>(malloc(n * sizeof(T)));
if (!result) throw std::bad_alloc();
return result;
}
template <typename T>
void CudaAllocator<T>::deallocate(T* ptr, size_t n) {
if (ptr) {
for (size_t i = 0; i < n; ++i) {
if (ptr[i]) {
hipFree(ptr[i]);
ptr[i] = nullptr;
}
}
free(ptr);
ptr = nullptr;
}
}
template <typename T>
void CudaHostAllocator<T>::deallocate(T* ptr, size_t n) {
if (ptr) {
for (size_t i = 0; i < n; ++i) {
if (ptr[i]) {
hipHostFree(ptr[i]);
ptr[i] = nullptr;
}
}
free(ptr);
ptr = nullptr;
}
}
template <typename T>
void print_cuda_ptr(T* dev_ptr, const size_t elem_size) {
hipError_t error = hipDeviceSynchronize();
if (error != hipSuccess) {
std::cout << __FILE__ << ":" << __LINE__ << " " << hipGetErrorString(error) << std::endl;
exit(-1);
}
std::unique_ptr<T []> host_vector(new T[elem_size]());
error = hipMemcpy(host_vector.get(), dev_ptr, sizeof(T) * elem_size, hipMemcpyDeviceToHost);
if (error != hipSuccess) {
std::cout << __FILE__ << ":" << __LINE__ << " " << hipGetErrorString(error) << std::endl;
exit(-1);
}
for (size_t i = 0; i < elem_size; ++i) {
std::cout << host_vector[i] << ", " << std::flush;
}
std::cout << std::endl;
return;
}
template class CudaAllocator<int*>;
template class CudaAllocator<long long*>;
template class CudaAllocator<char*>;
template class CudaAllocator<unsigned int*>;
template class CudaAllocator<bool*>;
template class CudaAllocator<size_t*>;
template class CudaAllocator<void*>;
template class CudaHostAllocator<size_t*>;
template hipError_t cub_flagged(void* d_temp_storage, size_t& temp_storage_bytes, long long* d_in,
bool* d_flags, int* d_out, size_t* d_num_selected_out,
int num_items, hipStream_t stream, bool debug_synchronous);
template hipError_t cub_flagged(void* d_temp_storage, size_t& temp_storage_bytes, long long* d_in,
bool* d_flags, long long* d_out, size_t* d_num_selected_out,
int num_items, hipStream_t stream, bool debug_synchronous);
template hipError_t cub_flagged(void* d_temp_storage, size_t& temp_storage_bytes, unsigned int* d_in,
bool* d_flags, unsigned int* d_out, size_t* d_num_selected_out,
int num_items, hipStream_t stream, bool debug_synchronous);
template void localized_new_row_indices(const int* row_indices, int* dev_row_indices, const size_t slot_num,
const size_t dev_slot_num, const size_t gpu_count, const size_t elem_size,
hipStream_t stream);
template void distributed_binary_vector(const long long* input_values, const size_t elem_size,
const size_t gpu_count, const size_t dev_id,
bool* binary_out, hipStream_t stream);
template void distributed_binary_vector(const unsigned int* input_values, const size_t elem_size,
const size_t gpu_count, const size_t dev_id,
bool* binary_out, hipStream_t stream);
template void localized_binary_vector(const long long* input_row_indices, const size_t elem_size,
const size_t gpu_count, const size_t dev_id, const size_t slot_num,
bool* binary_out, hipStream_t stream);
template void cast_elements(const int*, long long*, const size_t, const size_t, hipStream_t);
template void cast_elements(const int*, unsigned int*, const size_t, const size_t, hipStream_t);
template void kernel_print(size_t value, hipStream_t stream);
template void print_cuda_ptr(long long *, const size_t);
template void print_cuda_ptr(unsigned int *, const size_t);
template void print_cuda_ptr(int *, const size_t);
template void print_cuda_ptr(bool *, const size_t);
} // namespace CudaUtils
|
73857bbb296606f430af0b46cd3429508351286f.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_utils.h"
#include <cub/cub/cub.cuh>
#include <memory>
namespace CudaUtils {
/** this function is used to modify the elements.
* @param, the pointer to 2-D tensor keys tensor, corresponding shape is
* [batchsize * slot_num, max_nnz]
* @param fn, how to modify each element. input element, output its modified value.
* @param elem_size, how many elements.
*/
template <typename input_type, typename Func>
__global__ void modify_elements(input_type* keys_ptr, Func fn, const size_t elem_size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
for (size_t i = gid; i < elem_size; i += strid) {
keys_ptr[i] = fn(keys_ptr[i]);
}
}
template <typename Func, typename input_type, typename output_type>
__global__ void modify_elements(const input_type* input_ptr, output_type* output_ptr, Func fn, const size_t elem_size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
for (size_t i = gid; i < elem_size; i += strid) {
output_ptr[i] = fn(input_ptr[i]);
}
}
template <typename Func, typename input_type>
__global__ void binary_vector(const input_type* input, const size_t elem_size,
const size_t gpu_count, const size_t dev_id,
Func fn, bool* binary_out, const size_t slot_num = 0) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
for (size_t i = gid; i < elem_size; i += strid) {
binary_out[i] = (fn(input[i], gpu_count, dev_id, slot_num) ? true : false);
}
}
template <typename T>
__global__ void localized_new_row_indices(const T* row_indices, T* dev_row_indices,
const size_t slot_num, const size_t dev_slot_num,
const size_t gpu_count, const size_t elem_size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
T batch_idx = 0, slot_idx = 0, dev_slot_idx = 0;
for (size_t i = gid; i < elem_size; i += strid){
batch_idx = row_indices[i] / slot_num;
slot_idx = row_indices[i] % slot_num;
dev_slot_idx = slot_idx / gpu_count;
dev_row_indices[i] = static_cast<T>(batch_idx * dev_slot_num + dev_slot_idx);
}
}
template <typename T>
__global__ void kernel_print(T value) {
printf("%d\n", value);
}
template <typename T>
void kernel_print(T value, cudaStream_t stream) {
kernel_print<<<1, 1, 0, stream>>>(value);
}
template <typename input_type>
void distributed_binary_vector(const input_type* input_values, const size_t elem_size,
const size_t gpu_count, const size_t dev_id,
bool* binary_out, cudaStream_t stream) {
int block_dim = 128;
int grid_dim = (elem_size + block_dim - 1) / block_dim;
auto fn = [] __device__(const input_type value, const size_t gpu_count,
const size_t dev_id, const size_t slot_num) -> bool {
return ((dev_id == value % gpu_count) ? true : false);
};
binary_vector<<<grid_dim, block_dim, 0, stream>>>(input_values, elem_size, gpu_count, dev_id, fn, binary_out);
}
template <typename input_type>
void localized_binary_vector(const input_type* input_row_indices, const size_t elem_size,
const size_t gpu_count, const size_t dev_id, const size_t slot_num,
bool* binary_out, cudaStream_t stream){
int block_dim = 128;
int grid_dim = (elem_size + block_dim - 1) / block_dim;
auto fn = [] __device__(const input_type row_indice, const size_t gpu_count,
const size_t dev_id, const size_t slot_num) -> bool {
input_type slot_idx = row_indice % slot_num;
return ((dev_id == slot_idx % gpu_count) ? true : false);
};
binary_vector<<<grid_dim, block_dim, 0, stream>>>(input_row_indices, elem_size, gpu_count, dev_id, fn, binary_out, slot_num);
}
template <typename T>
void localized_new_row_indices(const T* row_indices, T* dev_row_indices, const size_t slot_num,
const size_t dev_slot_num, const size_t gpu_count, const size_t elem_size,
cudaStream_t stream) {
int block_dim = 128;
int grid_dim = (elem_size + block_dim - 1) / block_dim;
localized_new_row_indices<<<grid_dim, block_dim, 0, stream>>>(row_indices, dev_row_indices, slot_num,
dev_slot_num, gpu_count, elem_size);
}
template <typename input_type, typename output_type>
void cast_elements(const input_type* input_ptr, output_type* output_ptr, const size_t elem_size,
const size_t sm_count, cudaStream_t stream) {
int block_dim = 128;
// size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
int grid_dim = (elem_size + block_dim - 1) / block_dim;
auto cast_fn = [] __device__ (input_type num) -> output_type { return static_cast<output_type>(num); };
modify_elements<<<grid_dim, block_dim, 0, stream>>>(input_ptr, output_ptr, cast_fn, elem_size);
}
/*get the roof of the number*/
size_t num_roof(const size_t number, const size_t base) {
return ((number + base - 1) / base) * base;
}
/*warpper of cub::DeviceSelect*/
template <typename input_type, typename flag_type, typename output_type>
cudaError_t cub_flagged(void* d_temp_storage, size_t& temp_storage_bytes, input_type* d_in,
flag_type* d_flags, output_type* d_out, size_t* d_num_selected_out,
int num_items, cudaStream_t stream, bool debug_synchronous) {
return cub::DeviceSelect::Flagged(d_temp_storage,
temp_storage_bytes,
d_in,
d_flags,
d_out,
d_num_selected_out,
num_items,
stream,
debug_synchronous);
}
template <typename T>
T* CudaAllocator<T>::allocate(size_t n) {
T* result = nullptr;
result = static_cast<T*>(malloc(n * sizeof(T)));
if (!result) throw std::bad_alloc();
return result;
}
template <typename T>
T* CudaHostAllocator<T>::allocate(size_t n) {
T* result = nullptr;
result = static_cast<T*>(malloc(n * sizeof(T)));
if (!result) throw std::bad_alloc();
return result;
}
template <typename T>
void CudaAllocator<T>::deallocate(T* ptr, size_t n) {
if (ptr) {
for (size_t i = 0; i < n; ++i) {
if (ptr[i]) {
cudaFree(ptr[i]);
ptr[i] = nullptr;
}
}
free(ptr);
ptr = nullptr;
}
}
template <typename T>
void CudaHostAllocator<T>::deallocate(T* ptr, size_t n) {
if (ptr) {
for (size_t i = 0; i < n; ++i) {
if (ptr[i]) {
cudaFreeHost(ptr[i]);
ptr[i] = nullptr;
}
}
free(ptr);
ptr = nullptr;
}
}
template <typename T>
void print_cuda_ptr(T* dev_ptr, const size_t elem_size) {
cudaError_t error = cudaDeviceSynchronize();
if (error != cudaSuccess) {
std::cout << __FILE__ << ":" << __LINE__ << " " << cudaGetErrorString(error) << std::endl;
exit(-1);
}
std::unique_ptr<T []> host_vector(new T[elem_size]());
error = cudaMemcpy(host_vector.get(), dev_ptr, sizeof(T) * elem_size, cudaMemcpyDeviceToHost);
if (error != cudaSuccess) {
std::cout << __FILE__ << ":" << __LINE__ << " " << cudaGetErrorString(error) << std::endl;
exit(-1);
}
for (size_t i = 0; i < elem_size; ++i) {
std::cout << host_vector[i] << ", " << std::flush;
}
std::cout << std::endl;
return;
}
template class CudaAllocator<int*>;
template class CudaAllocator<long long*>;
template class CudaAllocator<char*>;
template class CudaAllocator<unsigned int*>;
template class CudaAllocator<bool*>;
template class CudaAllocator<size_t*>;
template class CudaAllocator<void*>;
template class CudaHostAllocator<size_t*>;
template cudaError_t cub_flagged(void* d_temp_storage, size_t& temp_storage_bytes, long long* d_in,
bool* d_flags, int* d_out, size_t* d_num_selected_out,
int num_items, cudaStream_t stream, bool debug_synchronous);
template cudaError_t cub_flagged(void* d_temp_storage, size_t& temp_storage_bytes, long long* d_in,
bool* d_flags, long long* d_out, size_t* d_num_selected_out,
int num_items, cudaStream_t stream, bool debug_synchronous);
template cudaError_t cub_flagged(void* d_temp_storage, size_t& temp_storage_bytes, unsigned int* d_in,
bool* d_flags, unsigned int* d_out, size_t* d_num_selected_out,
int num_items, cudaStream_t stream, bool debug_synchronous);
template void localized_new_row_indices(const int* row_indices, int* dev_row_indices, const size_t slot_num,
const size_t dev_slot_num, const size_t gpu_count, const size_t elem_size,
cudaStream_t stream);
template void distributed_binary_vector(const long long* input_values, const size_t elem_size,
const size_t gpu_count, const size_t dev_id,
bool* binary_out, cudaStream_t stream);
template void distributed_binary_vector(const unsigned int* input_values, const size_t elem_size,
const size_t gpu_count, const size_t dev_id,
bool* binary_out, cudaStream_t stream);
template void localized_binary_vector(const long long* input_row_indices, const size_t elem_size,
const size_t gpu_count, const size_t dev_id, const size_t slot_num,
bool* binary_out, cudaStream_t stream);
template void cast_elements(const int*, long long*, const size_t, const size_t, cudaStream_t);
template void cast_elements(const int*, unsigned int*, const size_t, const size_t, cudaStream_t);
template void kernel_print(size_t value, cudaStream_t stream);
template void print_cuda_ptr(long long *, const size_t);
template void print_cuda_ptr(unsigned int *, const size_t);
template void print_cuda_ptr(int *, const size_t);
template void print_cuda_ptr(bool *, const size_t);
} // namespace CudaUtils
|
2aca058da238b16e77d29daf206d51b22d07465e.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* FullyDiscreteKurganovTadmorScheme.cu
*
* Created on: Oct 23, 2015
* Author: bazow
*/
#include <stdlib.h>
#include <stdio.h> // for printf
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "edu/osu/rhic/trunk/hydro/FullyDiscreteKurganovTadmorScheme.cuh"
#include "edu/osu/rhic/harness/lattice/LatticeParameters.h"
#include "edu/osu/rhic/trunk/hydro/DynamicalVariables.cuh"
#include "edu/osu/rhic/trunk/hydro/GhostCells.cuh"
#include "edu/osu/rhic/core/muscl/SemiDiscreteKurganovTadmorScheme.cuh"
#include "edu/osu/rhic/core/muscl/HalfSiteExtrapolation.cuh"
#include "edu/osu//rhic/trunk/hydro/FluxFunctions.cuh"
#include "edu/osu//rhic/trunk/hydro/SpectralRadius.cuh"
#include "edu/osu/rhic/trunk/hydro/SourceTerms.cuh"
#include "edu/osu/rhic/trunk/hydro/EnergyMomentumTensor.cuh"
#include "edu/osu/rhic/harness/init/CudaConfiguration.cuh"
#include "edu/osu/rhic/trunk/hydro/RegulateDissipativeCurrents.cuh"
#include "edu/osu/rhic/trunk/hydro/EulerStep.cuh"
#include "edu/osu/rhic/trunk/hydro/HydrodynamicValidity.cuh"
//#define EULER_STEP_FUSED
//#define EULER_STEP_FUSED_1D
//#define EULER_STEP_SPLIT
//#define EULER_STEP_SMEM
#define EULER_STEP_SPLIT_1D
void eulerStep(PRECISION t, const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const PRECISION * const __restrict__ e, const PRECISION * const __restrict__ p, const FLUID_VELOCITY * const __restrict__ u,
const FLUID_VELOCITY * const __restrict__ up) {
#if defined EULER_STEP_FUSED
hipLaunchKernelGGL(( eulerStepKernel), dim3(GF), dim3(BF), 0, 0, t, currrentVars, updatedVars, e, p, u, up);
#elif defined EULER_STEP_FUSED_1D
hipLaunchKernelGGL(( eulerStepKernel_1D), dim3(grid_fused_1D), dim3(block_fused_1D), 0, 0, t, currrentVars, updatedVars, e, p, u, up);
#elif defined EULER_STEP_SPLIT
hipLaunchKernelGGL(( eulerStepKernelSource), dim3(grid), dim3(block), 0, 0, t, currrentVars, updatedVars, e, p, u, up);
hipLaunchKernelGGL(( eulerStepKernelX), dim3(grid_X), dim3(block_X), 0, 0, t, currrentVars, updatedVars, u, e);
hipLaunchKernelGGL(( eulerStepKernelY), dim3(grid_Y), dim3(block_Y), 0, 0, t, currrentVars, updatedVars, u, e);
hipLaunchKernelGGL(( eulerStepKernelZ), dim3(grid_Z), dim3(block_Z), 0, 0, t, currrentVars, updatedVars, u, e);
#elif defined EULER_STEP_SMEM
hipLaunchKernelGGL(( eulerStepKernelSource), dim3(grid), dim3(block), 0, 0, t, currrentVars, updatedVars, e, p, u, up);
hipLaunchKernelGGL(( eulerStepKernelSharedX), dim3(GSX), dim3(BSX), 0, 0, t, currrentVars, updatedVars, u, e);
hipLaunchKernelGGL(( eulerStepKernelSharedY), dim3(GSY), dim3(BSY), 0, 0, t, currrentVars, updatedVars, u, e);
hipLaunchKernelGGL(( eulerStepKernelSharedZ), dim3(GSZ), dim3(BSZ), 0, 0, t, currrentVars, updatedVars, u, e);
#elif defined EULER_STEP_SPLIT_1D
hipLaunchKernelGGL(( eulerStepKernelSource_1D), dim3(grid_1D), dim3(block_1D), 0, 0, t, currrentVars, updatedVars, e, p, u, up);
hipLaunchKernelGGL(( eulerStepKernelX_1D), dim3(gridX_1D), dim3(blockX_1D), 0, 0, t, currrentVars, updatedVars, u, e);
hipLaunchKernelGGL(( eulerStepKernelY_1D), dim3(gridY_1D), dim3(blockY_1D), 0, 0, t, currrentVars, updatedVars, u, e);
hipLaunchKernelGGL(( eulerStepKernelZ_1D), dim3(gridZ_1D), dim3(blockZ_1D), 0, 0, t, currrentVars, updatedVars, u, e);
#endif
}
__global__
void convexCombinationEulerStepKernel(const CONSERVED_VARIABLES * const __restrict__ q, CONSERVED_VARIABLES * const __restrict__ Q) {
unsigned int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < d_nElements) {
unsigned int k = threadID / (d_nx * d_ny) + N_GHOST_CELLS_M;
unsigned int j = (threadID % (d_nx * d_ny)) / d_nx + N_GHOST_CELLS_M;
unsigned int i = threadID % d_nx + N_GHOST_CELLS_M;
unsigned int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
Q->ttt[s] += q->ttt[s];
Q->ttt[s] /= 2;
Q->ttx[s] += q->ttx[s];
Q->ttx[s] /= 2;
Q->tty[s] += q->tty[s];
Q->tty[s] /= 2;
Q->ttn[s] += q->ttn[s];
Q->ttn[s] /= 2;
#ifdef PIMUNU
Q->pitt[s] += q->pitt[s];
Q->pitt[s] /= 2;
Q->pitx[s] += q->pitx[s];
Q->pitx[s] /= 2;
Q->pity[s] += q->pity[s];
Q->pity[s] /= 2;
Q->pitn[s] += q->pitn[s];
Q->pitn[s] /= 2;
Q->pixx[s] += q->pixx[s];
Q->pixx[s] /= 2;
Q->pixy[s] += q->pixy[s];
Q->pixy[s] /= 2;
Q->pixn[s] += q->pixn[s];
Q->pixn[s] /= 2;
Q->piyy[s] += q->piyy[s];
Q->piyy[s] /= 2;
Q->piyn[s] += q->piyn[s];
Q->piyn[s] /= 2;
Q->pinn[s] += q->pinn[s];
Q->pinn[s] /= 2;
#endif
#ifdef PI
Q->Pi[s] += q->Pi[s];
Q->Pi[s] /= 2;
#endif
}
}
#ifndef IDEAL
#define REGULATE_DISSIPATIVE_CURRENTS
#endif
void twoStepRungeKutta(PRECISION t, PRECISION dt, CONSERVED_VARIABLES * __restrict__ d_q, CONSERVED_VARIABLES * __restrict__ d_Q) {
//===================================================
// Predicted step
//===================================================
eulerStep(t, d_q, d_qS, d_e, d_p, d_u, d_up);
t += dt;
hipLaunchKernelGGL(( setInferredVariablesKernel), dim3(gridSizeInferredVars), dim3(blockSizeInferredVars), 0, 0, d_qS, d_e, d_p, d_uS, t);
#ifdef REGULATE_DISSIPATIVE_CURRENTS
hipLaunchKernelGGL(( regulateDissipativeCurrents), dim3(gridSizeReg), dim3(blockSizeReg), 0, 0, t, d_qS, d_e, d_p, d_uS, d_validityDomain);
#endif
setGhostCells(d_qS, d_e, d_p, d_uS);
//===================================================
// Corrected step
//===================================================
eulerStep(t, d_qS, d_Q, d_e, d_p, d_uS, d_u);
hipLaunchKernelGGL(( convexCombinationEulerStepKernel), dim3(gridSizeConvexComb), dim3(blockSizeConvexComb), 0, 0, d_q, d_Q);
swapFluidVelocity(&d_up, &d_u);
hipLaunchKernelGGL(( setInferredVariablesKernel), dim3(gridSizeInferredVars), dim3(blockSizeInferredVars), 0, 0, d_Q, d_e, d_p, d_u, t);
#ifdef REGULATE_DISSIPATIVE_CURRENTS
hipLaunchKernelGGL(( regulateDissipativeCurrents), dim3(gridSizeReg), dim3(blockSizeReg), 0, 0, t, d_Q, d_e, d_p, d_u, d_validityDomain);
#endif
setGhostCells(d_Q, d_e, d_p, d_u);
//#ifndef IDEAL
checkValidity(t, d_validityDomain, d_q, d_e, d_p, d_u, d_up);
//#endif
hipDeviceSynchronize();
}
|
2aca058da238b16e77d29daf206d51b22d07465e.cu
|
/*
* FullyDiscreteKurganovTadmorScheme.cu
*
* Created on: Oct 23, 2015
* Author: bazow
*/
#include <stdlib.h>
#include <stdio.h> // for printf
#include <cuda.h>
#include <cuda_runtime.h>
#include "edu/osu/rhic/trunk/hydro/FullyDiscreteKurganovTadmorScheme.cuh"
#include "edu/osu/rhic/harness/lattice/LatticeParameters.h"
#include "edu/osu/rhic/trunk/hydro/DynamicalVariables.cuh"
#include "edu/osu/rhic/trunk/hydro/GhostCells.cuh"
#include "edu/osu/rhic/core/muscl/SemiDiscreteKurganovTadmorScheme.cuh"
#include "edu/osu/rhic/core/muscl/HalfSiteExtrapolation.cuh"
#include "edu/osu//rhic/trunk/hydro/FluxFunctions.cuh"
#include "edu/osu//rhic/trunk/hydro/SpectralRadius.cuh"
#include "edu/osu/rhic/trunk/hydro/SourceTerms.cuh"
#include "edu/osu/rhic/trunk/hydro/EnergyMomentumTensor.cuh"
#include "edu/osu/rhic/harness/init/CudaConfiguration.cuh"
#include "edu/osu/rhic/trunk/hydro/RegulateDissipativeCurrents.cuh"
#include "edu/osu/rhic/trunk/hydro/EulerStep.cuh"
#include "edu/osu/rhic/trunk/hydro/HydrodynamicValidity.cuh"
//#define EULER_STEP_FUSED
//#define EULER_STEP_FUSED_1D
//#define EULER_STEP_SPLIT
//#define EULER_STEP_SMEM
#define EULER_STEP_SPLIT_1D
void eulerStep(PRECISION t, const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const PRECISION * const __restrict__ e, const PRECISION * const __restrict__ p, const FLUID_VELOCITY * const __restrict__ u,
const FLUID_VELOCITY * const __restrict__ up) {
#if defined EULER_STEP_FUSED
eulerStepKernel<<<GF, BF>>>(t, currrentVars, updatedVars, e, p, u, up);
#elif defined EULER_STEP_FUSED_1D
eulerStepKernel_1D<<<grid_fused_1D, block_fused_1D>>>(t, currrentVars, updatedVars, e, p, u, up);
#elif defined EULER_STEP_SPLIT
eulerStepKernelSource<<<grid, block>>>(t, currrentVars, updatedVars, e, p, u, up);
eulerStepKernelX<<<grid_X, block_X>>>(t, currrentVars, updatedVars, u, e);
eulerStepKernelY<<<grid_Y, block_Y>>>(t, currrentVars, updatedVars, u, e);
eulerStepKernelZ<<<grid_Z, block_Z>>>(t, currrentVars, updatedVars, u, e);
#elif defined EULER_STEP_SMEM
eulerStepKernelSource<<<grid, block>>>(t, currrentVars, updatedVars, e, p, u, up);
eulerStepKernelSharedX<<<GSX, BSX>>>(t, currrentVars, updatedVars, u, e);
eulerStepKernelSharedY<<<GSY, BSY>>>(t, currrentVars, updatedVars, u, e);
eulerStepKernelSharedZ<<<GSZ, BSZ>>>(t, currrentVars, updatedVars, u, e);
#elif defined EULER_STEP_SPLIT_1D
eulerStepKernelSource_1D<<<grid_1D, block_1D>>>(t, currrentVars, updatedVars, e, p, u, up);
eulerStepKernelX_1D<<<gridX_1D, blockX_1D>>>(t, currrentVars, updatedVars, u, e);
eulerStepKernelY_1D<<<gridY_1D, blockY_1D>>>(t, currrentVars, updatedVars, u, e);
eulerStepKernelZ_1D<<<gridZ_1D, blockZ_1D>>>(t, currrentVars, updatedVars, u, e);
#endif
}
__global__
void convexCombinationEulerStepKernel(const CONSERVED_VARIABLES * const __restrict__ q, CONSERVED_VARIABLES * const __restrict__ Q) {
unsigned int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < d_nElements) {
unsigned int k = threadID / (d_nx * d_ny) + N_GHOST_CELLS_M;
unsigned int j = (threadID % (d_nx * d_ny)) / d_nx + N_GHOST_CELLS_M;
unsigned int i = threadID % d_nx + N_GHOST_CELLS_M;
unsigned int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
Q->ttt[s] += q->ttt[s];
Q->ttt[s] /= 2;
Q->ttx[s] += q->ttx[s];
Q->ttx[s] /= 2;
Q->tty[s] += q->tty[s];
Q->tty[s] /= 2;
Q->ttn[s] += q->ttn[s];
Q->ttn[s] /= 2;
#ifdef PIMUNU
Q->pitt[s] += q->pitt[s];
Q->pitt[s] /= 2;
Q->pitx[s] += q->pitx[s];
Q->pitx[s] /= 2;
Q->pity[s] += q->pity[s];
Q->pity[s] /= 2;
Q->pitn[s] += q->pitn[s];
Q->pitn[s] /= 2;
Q->pixx[s] += q->pixx[s];
Q->pixx[s] /= 2;
Q->pixy[s] += q->pixy[s];
Q->pixy[s] /= 2;
Q->pixn[s] += q->pixn[s];
Q->pixn[s] /= 2;
Q->piyy[s] += q->piyy[s];
Q->piyy[s] /= 2;
Q->piyn[s] += q->piyn[s];
Q->piyn[s] /= 2;
Q->pinn[s] += q->pinn[s];
Q->pinn[s] /= 2;
#endif
#ifdef PI
Q->Pi[s] += q->Pi[s];
Q->Pi[s] /= 2;
#endif
}
}
#ifndef IDEAL
#define REGULATE_DISSIPATIVE_CURRENTS
#endif
void twoStepRungeKutta(PRECISION t, PRECISION dt, CONSERVED_VARIABLES * __restrict__ d_q, CONSERVED_VARIABLES * __restrict__ d_Q) {
//===================================================
// Predicted step
//===================================================
eulerStep(t, d_q, d_qS, d_e, d_p, d_u, d_up);
t += dt;
setInferredVariablesKernel<<<gridSizeInferredVars, blockSizeInferredVars>>>(d_qS, d_e, d_p, d_uS, t);
#ifdef REGULATE_DISSIPATIVE_CURRENTS
regulateDissipativeCurrents<<<gridSizeReg, blockSizeReg>>>(t, d_qS, d_e, d_p, d_uS, d_validityDomain);
#endif
setGhostCells(d_qS, d_e, d_p, d_uS);
//===================================================
// Corrected step
//===================================================
eulerStep(t, d_qS, d_Q, d_e, d_p, d_uS, d_u);
convexCombinationEulerStepKernel<<<gridSizeConvexComb, blockSizeConvexComb>>>(d_q, d_Q);
swapFluidVelocity(&d_up, &d_u);
setInferredVariablesKernel<<<gridSizeInferredVars, blockSizeInferredVars>>>(d_Q, d_e, d_p, d_u, t);
#ifdef REGULATE_DISSIPATIVE_CURRENTS
regulateDissipativeCurrents<<<gridSizeReg, blockSizeReg>>>(t, d_Q, d_e, d_p, d_u, d_validityDomain);
#endif
setGhostCells(d_Q, d_e, d_p, d_u);
//#ifndef IDEAL
checkValidity(t, d_validityDomain, d_q, d_e, d_p, d_u, d_up);
//#endif
cudaDeviceSynchronize();
}
|
73fc435300a6d895b5defa0c464a6cc8e48507bb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<math.h>
#include<iostream>
#include<cstdlib>
#define BLOCKSIZE 256
// __global__
// void dot(float* X, float* Y, float* c){
// // int index = blockIdx.x * blockDim.x + threadIdx.x;
// // __shared__ float cache[BLOCKSIZE];
// //
// // cache[threadIdx.x] = X[index] * Y[index];
// //
// // __syncthreads();
// //
// // if(threadIdx.x == 0){
// // int sum = 0;
// // for(int i = 0; i < BLOCKSIZE; i++){
// // sum += cache[i];
// // }
// // atomicAdd(c, sum);
// // }
// }
//
__global__
void dot(float* X, float* Y, float* c, int n){
__shared__ float cache[BLOCKSIZE];
int i = blockIdx.x * blockDim.x + threadIdx.x;
while(i < n){
cache[threadIdx.x] = X[i] * Y[i];
i+= gridDim.x * blockDim.x;
}
__syncthreads();
i = BLOCKSIZE / 2;
while(i > 2){
if(threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x +i];
__syncthreads();
i /= 2;
}
if (threadIdx.x ==0) atomicAdd(c, cache[0]);
}
__device__
float sigmoid(float x){
return 1.0f / (1 + exp(-x));
}
__global__
void sigPass(int n, float *X, float *S){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = idx; i<n; i += stride){
S[i] = sigmoid(X[i]);
}
}
__global sigBackProp(int n, float *X, float *errors, float *out_slopes){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i+= stride){
out_slopes[i] = errors[i] * sigmoid(X[i]) * (1 - sigmoid(X[i]));
}
}
int main(void){
int xdim = 1<<16;
int ydim = 1;
int n = xdim * ydim;
float *X, *Y, *S, *c;
hipMallocManaged(&X, n*sizeof(float));
hipMallocManaged(&S, sizeof(float));
hipMallocManaged(&Y, n*sizeof(float));
hipMallocManaged(&c, sizeof(float));
for (int i = 0; i < n; i++){
X[i] = (rand() %100)/10000.f;
Y[i] = (rand() %100)/10000.f;
}
int numBlocks = (n + BLOCKSIZE - 1) / BLOCKSIZE;
hipLaunchKernelGGL(( dot), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, X,Y,c,n);
hipDeviceSynchronize();
printf(" dot output: %f", *c);
hipFree(X);
hipFree(Y);
hipLaunchKernelGGL(( sigPass), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, 1,c,S);
hipDeviceSynchronize();
printf("final output: %f\n", *S);
hipFree(c);
hipFree(S);
}
|
73fc435300a6d895b5defa0c464a6cc8e48507bb.cu
|
#include<math.h>
#include<iostream>
#include<cstdlib>
#define BLOCKSIZE 256
// __global__
// void dot(float* X, float* Y, float* c){
// // int index = blockIdx.x * blockDim.x + threadIdx.x;
// // __shared__ float cache[BLOCKSIZE];
// //
// // cache[threadIdx.x] = X[index] * Y[index];
// //
// // __syncthreads();
// //
// // if(threadIdx.x == 0){
// // int sum = 0;
// // for(int i = 0; i < BLOCKSIZE; i++){
// // sum += cache[i];
// // }
// // atomicAdd(c, sum);
// // }
// }
//
__global__
void dot(float* X, float* Y, float* c, int n){
__shared__ float cache[BLOCKSIZE];
int i = blockIdx.x * blockDim.x + threadIdx.x;
while(i < n){
cache[threadIdx.x] = X[i] * Y[i];
i+= gridDim.x * blockDim.x;
}
__syncthreads();
i = BLOCKSIZE / 2;
while(i > 2){
if(threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x +i];
__syncthreads();
i /= 2;
}
if (threadIdx.x ==0) atomicAdd(c, cache[0]);
}
__device__
float sigmoid(float x){
return 1.0f / (1 + exp(-x));
}
__global__
void sigPass(int n, float *X, float *S){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = idx; i<n; i += stride){
S[i] = sigmoid(X[i]);
}
}
__global sigBackProp(int n, float *X, float *errors, float *out_slopes){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i+= stride){
out_slopes[i] = errors[i] * sigmoid(X[i]) * (1 - sigmoid(X[i]));
}
}
int main(void){
int xdim = 1<<16;
int ydim = 1;
int n = xdim * ydim;
float *X, *Y, *S, *c;
cudaMallocManaged(&X, n*sizeof(float));
cudaMallocManaged(&S, sizeof(float));
cudaMallocManaged(&Y, n*sizeof(float));
cudaMallocManaged(&c, sizeof(float));
for (int i = 0; i < n; i++){
X[i] = (rand() %100)/10000.f;
Y[i] = (rand() %100)/10000.f;
}
int numBlocks = (n + BLOCKSIZE - 1) / BLOCKSIZE;
dot<<<numBlocks, BLOCKSIZE>>>(X,Y,c,n);
cudaDeviceSynchronize();
printf(" dot output: %f", *c);
cudaFree(X);
cudaFree(Y);
sigPass<<<numBlocks, BLOCKSIZE>>>(1,c,S);
cudaDeviceSynchronize();
printf("final output: %f\n", *S);
cudaFree(c);
cudaFree(S);
}
|
d64d7afbb9cc47a8884b2c8a95e46f103a11d462.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_tea_leaf_common_residual_kernel;
int xdim0_tea_leaf_common_residual_kernel_h = -1;
__constant__ int xdim1_tea_leaf_common_residual_kernel;
int xdim1_tea_leaf_common_residual_kernel_h = -1;
__constant__ int xdim2_tea_leaf_common_residual_kernel;
int xdim2_tea_leaf_common_residual_kernel_h = -1;
__constant__ int xdim3_tea_leaf_common_residual_kernel;
int xdim3_tea_leaf_common_residual_kernel_h = -1;
__constant__ int xdim4_tea_leaf_common_residual_kernel;
int xdim4_tea_leaf_common_residual_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#define OPS_ACC0(x, y) (x + xdim0_tea_leaf_common_residual_kernel * (y))
#define OPS_ACC1(x, y) (x + xdim1_tea_leaf_common_residual_kernel * (y))
#define OPS_ACC2(x, y) (x + xdim2_tea_leaf_common_residual_kernel * (y))
#define OPS_ACC3(x, y) (x + xdim3_tea_leaf_common_residual_kernel * (y))
#define OPS_ACC4(x, y) (x + xdim4_tea_leaf_common_residual_kernel * (y))
// user function
__device__
void
tea_leaf_common_residual_kernel_gpu(double *r, const double *Kx,
const double *Ky, const double *u,
const double *u0, const double *rx,
const double *ry) {
double smvp = 0.0;
smvp = (1.0 + (*ry) * (Ky[OPS_ACC2(0, 1)] + Ky[OPS_ACC2(0, 0)]) +
(*rx) * (Kx[OPS_ACC1(1, 0)] + Kx[OPS_ACC1(0, 0)])) *
u[OPS_ACC3(0, 0)] -
(*ry) * (Ky[OPS_ACC2(0, 1)] * u[OPS_ACC3(0, 1)] +
Ky[OPS_ACC2(0, 0)] * u[OPS_ACC3(0, -1)]) -
(*rx) * (Kx[OPS_ACC1(1, 0)] * u[OPS_ACC3(1, 0)] +
Kx[OPS_ACC1(0, 0)] * u[OPS_ACC3(-1, 0)]);
r[OPS_ACC0(0, 0)] = u0[OPS_ACC4(0, 0)] - smvp;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
__global__ void ops_tea_leaf_common_residual_kernel(
double *__restrict arg0, const double *__restrict arg1,
const double *__restrict arg2, const double *__restrict arg3,
const double *__restrict arg4, const double arg5, const double arg6,
int size0, int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_tea_leaf_common_residual_kernel;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_tea_leaf_common_residual_kernel;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_tea_leaf_common_residual_kernel;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_tea_leaf_common_residual_kernel;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_tea_leaf_common_residual_kernel;
if (idx_x < size0 && idx_y < size1) {
tea_leaf_common_residual_kernel_gpu(arg0, arg1, arg2, arg3, arg4, &arg5,
&arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_tea_leaf_common_residual_kernel(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_tea_leaf_common_residual_kernel_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[7] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 7, range, 38))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(38, "tea_leaf_common_residual_kernel");
OPS_kernels[38].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
if (xdim0 != xdim0_tea_leaf_common_residual_kernel_h ||
xdim1 != xdim1_tea_leaf_common_residual_kernel_h ||
xdim2 != xdim2_tea_leaf_common_residual_kernel_h ||
xdim3 != xdim3_tea_leaf_common_residual_kernel_h ||
xdim4 != xdim4_tea_leaf_common_residual_kernel_h) {
hipMemcpyToSymbol(xdim0_tea_leaf_common_residual_kernel, &xdim0,
sizeof(int));
xdim0_tea_leaf_common_residual_kernel_h = xdim0;
hipMemcpyToSymbol(xdim1_tea_leaf_common_residual_kernel, &xdim1,
sizeof(int));
xdim1_tea_leaf_common_residual_kernel_h = xdim1;
hipMemcpyToSymbol(xdim2_tea_leaf_common_residual_kernel, &xdim2,
sizeof(int));
xdim2_tea_leaf_common_residual_kernel_h = xdim2;
hipMemcpyToSymbol(xdim3_tea_leaf_common_residual_kernel, &xdim3,
sizeof(int));
xdim3_tea_leaf_common_residual_kernel_h = xdim3;
hipMemcpyToSymbol(xdim4_tea_leaf_common_residual_kernel, &xdim4,
sizeof(int));
xdim4_tea_leaf_common_residual_kernel_h = xdim4;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
char *p_a[7];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args, 7, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[38].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_tea_leaf_common_residual_kernel), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], *(double *)arg5.data, *(double *)arg6.data, x_size,
y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[38].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[38].mpi_time += t2 - t1;
OPS_kernels[38].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[38].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[38].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[38].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[38].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
#ifdef OPS_LAZY
void ops_par_loop_tea_leaf_common_residual_kernel(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 38;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 38;
for (int i = 0; i < 4; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg *)malloc(7 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
char *tmp = (char *)malloc(1 * sizeof(double));
memcpy(tmp, arg5.data, 1 * sizeof(double));
desc->args[5].data = tmp;
desc->args[6] = arg6;
tmp = (char *)malloc(1 * sizeof(double));
memcpy(tmp, arg6.data, 1 * sizeof(double));
desc->args[6].data = tmp;
desc->function = ops_par_loop_tea_leaf_common_residual_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(38, "tea_leaf_common_residual_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
d64d7afbb9cc47a8884b2c8a95e46f103a11d462.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_tea_leaf_common_residual_kernel;
int xdim0_tea_leaf_common_residual_kernel_h = -1;
__constant__ int xdim1_tea_leaf_common_residual_kernel;
int xdim1_tea_leaf_common_residual_kernel_h = -1;
__constant__ int xdim2_tea_leaf_common_residual_kernel;
int xdim2_tea_leaf_common_residual_kernel_h = -1;
__constant__ int xdim3_tea_leaf_common_residual_kernel;
int xdim3_tea_leaf_common_residual_kernel_h = -1;
__constant__ int xdim4_tea_leaf_common_residual_kernel;
int xdim4_tea_leaf_common_residual_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#define OPS_ACC0(x, y) (x + xdim0_tea_leaf_common_residual_kernel * (y))
#define OPS_ACC1(x, y) (x + xdim1_tea_leaf_common_residual_kernel * (y))
#define OPS_ACC2(x, y) (x + xdim2_tea_leaf_common_residual_kernel * (y))
#define OPS_ACC3(x, y) (x + xdim3_tea_leaf_common_residual_kernel * (y))
#define OPS_ACC4(x, y) (x + xdim4_tea_leaf_common_residual_kernel * (y))
// user function
__device__
void
tea_leaf_common_residual_kernel_gpu(double *r, const double *Kx,
const double *Ky, const double *u,
const double *u0, const double *rx,
const double *ry) {
double smvp = 0.0;
smvp = (1.0 + (*ry) * (Ky[OPS_ACC2(0, 1)] + Ky[OPS_ACC2(0, 0)]) +
(*rx) * (Kx[OPS_ACC1(1, 0)] + Kx[OPS_ACC1(0, 0)])) *
u[OPS_ACC3(0, 0)] -
(*ry) * (Ky[OPS_ACC2(0, 1)] * u[OPS_ACC3(0, 1)] +
Ky[OPS_ACC2(0, 0)] * u[OPS_ACC3(0, -1)]) -
(*rx) * (Kx[OPS_ACC1(1, 0)] * u[OPS_ACC3(1, 0)] +
Kx[OPS_ACC1(0, 0)] * u[OPS_ACC3(-1, 0)]);
r[OPS_ACC0(0, 0)] = u0[OPS_ACC4(0, 0)] - smvp;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
__global__ void ops_tea_leaf_common_residual_kernel(
double *__restrict arg0, const double *__restrict arg1,
const double *__restrict arg2, const double *__restrict arg3,
const double *__restrict arg4, const double arg5, const double arg6,
int size0, int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_tea_leaf_common_residual_kernel;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_tea_leaf_common_residual_kernel;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_tea_leaf_common_residual_kernel;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_tea_leaf_common_residual_kernel;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_tea_leaf_common_residual_kernel;
if (idx_x < size0 && idx_y < size1) {
tea_leaf_common_residual_kernel_gpu(arg0, arg1, arg2, arg3, arg4, &arg5,
&arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_tea_leaf_common_residual_kernel(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_tea_leaf_common_residual_kernel_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[7] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 7, range, 38))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(38, "tea_leaf_common_residual_kernel");
OPS_kernels[38].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
if (xdim0 != xdim0_tea_leaf_common_residual_kernel_h ||
xdim1 != xdim1_tea_leaf_common_residual_kernel_h ||
xdim2 != xdim2_tea_leaf_common_residual_kernel_h ||
xdim3 != xdim3_tea_leaf_common_residual_kernel_h ||
xdim4 != xdim4_tea_leaf_common_residual_kernel_h) {
cudaMemcpyToSymbol(xdim0_tea_leaf_common_residual_kernel, &xdim0,
sizeof(int));
xdim0_tea_leaf_common_residual_kernel_h = xdim0;
cudaMemcpyToSymbol(xdim1_tea_leaf_common_residual_kernel, &xdim1,
sizeof(int));
xdim1_tea_leaf_common_residual_kernel_h = xdim1;
cudaMemcpyToSymbol(xdim2_tea_leaf_common_residual_kernel, &xdim2,
sizeof(int));
xdim2_tea_leaf_common_residual_kernel_h = xdim2;
cudaMemcpyToSymbol(xdim3_tea_leaf_common_residual_kernel, &xdim3,
sizeof(int));
xdim3_tea_leaf_common_residual_kernel_h = xdim3;
cudaMemcpyToSymbol(xdim4_tea_leaf_common_residual_kernel, &xdim4,
sizeof(int));
xdim4_tea_leaf_common_residual_kernel_h = xdim4;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
char *p_a[7];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args, 7, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[38].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_tea_leaf_common_residual_kernel<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], *(double *)arg5.data, *(double *)arg6.data, x_size,
y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[38].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[38].mpi_time += t2 - t1;
OPS_kernels[38].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[38].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[38].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[38].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[38].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
#ifdef OPS_LAZY
void ops_par_loop_tea_leaf_common_residual_kernel(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 38;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 38;
for (int i = 0; i < 4; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg *)malloc(7 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
char *tmp = (char *)malloc(1 * sizeof(double));
memcpy(tmp, arg5.data, 1 * sizeof(double));
desc->args[5].data = tmp;
desc->args[6] = arg6;
tmp = (char *)malloc(1 * sizeof(double));
memcpy(tmp, arg6.data, 1 * sizeof(double));
desc->args[6].data = tmp;
desc->function = ops_par_loop_tea_leaf_common_residual_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(38, "tea_leaf_common_residual_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
6cee481e870b30a6a4cfae3bf55406b4cda48775.hip
|
// !!! This is a file automatically generated by hipify!!!
// This file is no longer used
// Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
//
// This work is made available under the Nvidia Source Code License-NC.
// To view a copy of this license, visit
// https://nvlabs.github.io/stylegan2/license.html
#include <torch/types.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
template <typename scalar_t>
static __global__ void fused_bias_act_kernel(scalar_t* out, const scalar_t* p_x, const scalar_t* p_b, const scalar_t* p_ref,
int act, int grad, scalar_t alpha, scalar_t scale, int loop_x, int size_x, int step_b, int size_b, int use_bias, int use_ref) {
int xi = blockIdx.x * loop_x * blockDim.x + threadIdx.x;
scalar_t zero = 0.0;
for (int loop_idx = 0; loop_idx < loop_x && xi < size_x; loop_idx++, xi += blockDim.x) {
scalar_t x = p_x[xi];
if (use_bias) {
x += p_b[(xi / step_b) % size_b];
}
scalar_t ref = use_ref ? p_ref[xi] : zero;
scalar_t y;
switch (act * 10 + grad) {
default:
case 10: y = x; break;
case 11: y = x; break;
case 12: y = 0.0; break;
case 30: y = (x > 0.0) ? x : x * alpha; break;
case 31: y = (ref > 0.0) ? x : x * alpha; break;
case 32: y = 0.0; break;
}
out[xi] = y * scale;
}
}
torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
int act, int grad, float alpha, float scale) {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto x = input.contiguous();
auto b = bias.contiguous();
auto ref = refer.contiguous();
int use_bias = b.numel() ? 1 : 0;
int use_ref = ref.numel() ? 1 : 0;
int size_x = x.numel();
int size_b = b.numel();
int step_b = 1;
for (int i = 1 + 1; i < x.dim(); i++) {
step_b *= x.size(i);
}
int loop_x = 4;
int block_size = 4 * 32;
int grid_size = (size_x - 1) / (loop_x * block_size) + 1;
auto y = torch::empty_like(x);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "fused_bias_act_kernel", [&] {
hipLaunchKernelGGL(( fused_bias_act_kernel<scalar_t>), dim3(grid_size), dim3(block_size), 0, stream,
y.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
b.data_ptr<scalar_t>(),
ref.data_ptr<scalar_t>(),
act,
grad,
alpha,
scale,
loop_x,
size_x,
step_b,
size_b,
use_bias,
use_ref
);
});
return y;
}
|
6cee481e870b30a6a4cfae3bf55406b4cda48775.cu
|
// This file is no longer used
// Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
//
// This work is made available under the Nvidia Source Code License-NC.
// To view a copy of this license, visit
// https://nvlabs.github.io/stylegan2/license.html
#include <torch/types.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <cuda.h>
#include <cuda_runtime.h>
template <typename scalar_t>
static __global__ void fused_bias_act_kernel(scalar_t* out, const scalar_t* p_x, const scalar_t* p_b, const scalar_t* p_ref,
int act, int grad, scalar_t alpha, scalar_t scale, int loop_x, int size_x, int step_b, int size_b, int use_bias, int use_ref) {
int xi = blockIdx.x * loop_x * blockDim.x + threadIdx.x;
scalar_t zero = 0.0;
for (int loop_idx = 0; loop_idx < loop_x && xi < size_x; loop_idx++, xi += blockDim.x) {
scalar_t x = p_x[xi];
if (use_bias) {
x += p_b[(xi / step_b) % size_b];
}
scalar_t ref = use_ref ? p_ref[xi] : zero;
scalar_t y;
switch (act * 10 + grad) {
default:
case 10: y = x; break;
case 11: y = x; break;
case 12: y = 0.0; break;
case 30: y = (x > 0.0) ? x : x * alpha; break;
case 31: y = (ref > 0.0) ? x : x * alpha; break;
case 32: y = 0.0; break;
}
out[xi] = y * scale;
}
}
torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
int act, int grad, float alpha, float scale) {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto x = input.contiguous();
auto b = bias.contiguous();
auto ref = refer.contiguous();
int use_bias = b.numel() ? 1 : 0;
int use_ref = ref.numel() ? 1 : 0;
int size_x = x.numel();
int size_b = b.numel();
int step_b = 1;
for (int i = 1 + 1; i < x.dim(); i++) {
step_b *= x.size(i);
}
int loop_x = 4;
int block_size = 4 * 32;
int grid_size = (size_x - 1) / (loop_x * block_size) + 1;
auto y = torch::empty_like(x);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "fused_bias_act_kernel", [&] {
fused_bias_act_kernel<scalar_t><<<grid_size, block_size, 0, stream>>>(
y.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
b.data_ptr<scalar_t>(),
ref.data_ptr<scalar_t>(),
act,
grad,
alpha,
scale,
loop_x,
size_x,
step_b,
size_b,
use_bias,
use_ref
);
});
return y;
}
|
c6d78c4465c428660307eb7fea50796caed08e95.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#include "stdlib.h"
#include <cmath>
using namespace std;
#define ERROR_HANDLING(call) { \
hipError_t error = call; \
if(error != hipSuccess) { \
fprintf(stderr, "ERROR: in file '%s' in line %i: %s.\n", \
__FILE__, __LINE__, hipGetErrorString(error)); \
exit(1); \
} \
} while (0)
texture<uchar4, 2, hipReadModeElementType> texRef2D;
__device__
double getBrightnessFromRGB(uchar4 p) {
return 0.299*p.x + 0.587*p.y + 0.114*p.z;
}
__device__
int convolution(double window[3][3]) {
double Gx = window[0][2] - window[0][0] + window[1][2] - window[1][0] + window[2][2] - window[2][0];
double Gy = window[2][0] - window[0][0] + window[2][1] - window[0][1] + window[2][2] - window[0][2];
double eps = 1e-10;
double G = sqrt(Gx*Gx+Gy*Gy);
int res = abs(((int)G+1.0)-G)<eps ? (int)G+1 : (int)G;
if (res < 0)
res = 0;
else if (res > 255)
res = 255;
return res;
}
__global__
void PrewittKernel(uchar4 *device_data, int width, int height) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int x, y;
const int windowWidth = 3;
const int windowHeight = 3;
for(x = idx; x < width; x += offsetx)
for(y = idy; y < height; y += offsety) {
uchar4 windowPoints[windowHeight][windowWidth];
uchar4 centerPoint = tex2D(texRef2D, x, y);
windowPoints[0][0] = tex2D(texRef2D, x-1, y-1);
windowPoints[0][1] = tex2D(texRef2D, x, y-1);
windowPoints[0][2] = tex2D(texRef2D, x+1, y-1);
windowPoints[1][0] = tex2D(texRef2D, x-1, y);
windowPoints[1][1] = centerPoint;
windowPoints[1][2] = tex2D(texRef2D, x+1, y);
windowPoints[2][0] = tex2D(texRef2D, x-1, y+1);
windowPoints[2][1] = tex2D(texRef2D, x, y+1);
windowPoints[2][2] = tex2D(texRef2D, x+1, y+1);
double windowBrightnesses[windowWidth][windowHeight];
for (int i = 0; i < windowHeight; i++)
for (int j = 0; j < windowWidth; j++) {
windowBrightnesses[i][j] = getBrightnessFromRGB(windowPoints[i][j]);
}
int resBrightness = convolution(windowBrightnesses);
uchar4 resPoint = make_uchar4(resBrightness, resBrightness, resBrightness, centerPoint.w);
device_data[y * width + x] = resPoint;
}
}
int main(int argc, char *argv[]) {
int width, height;
char inputFilename[256];
char outputFilename[256];
scanf("%s", inputFilename);
scanf("%s", outputFilename);
FILE* file = fopen(inputFilename, "rb");
fread(&width, sizeof(int), 1, file);
fread(&height, sizeof(int), 1, file);
uchar4* io_data = (uchar4*)malloc(sizeof(uchar4)*height*width);
fread(io_data, sizeof(uchar4), width*height, file);
fclose(file);
hipArray *c_arr;
hipChannelFormatDesc ch = hipCreateChannelDesc<uchar4>();
ERROR_HANDLING(hipMallocArray(&c_arr, &ch, width, height));
ERROR_HANDLING(hipMemcpyToArray(c_arr, 0, 0, io_data, sizeof(uchar4) * width * height, hipMemcpyHostToDevice));
texRef2D.addressMode[0] = hipAddressModeClamp;
texRef2D.addressMode[1] = hipAddressModeClamp;
texRef2D.channelDesc = ch;
texRef2D.filterMode = hipFilterModePoint;
texRef2D.normalized = false;
ERROR_HANDLING(hipBindTextureToArray(texRef2D, c_arr, ch));
uchar4 *dev_data;
ERROR_HANDLING(hipMalloc(&dev_data, sizeof(uchar4) * width * height));
hipLaunchKernelGGL(( PrewittKernel), dim3(dim3(32, 32)), dim3(dim3(16, 16)), 0, 0, dev_data, width, height);
ERROR_HANDLING(hipGetLastError());
ERROR_HANDLING(hipMemcpy(io_data, dev_data, sizeof(uchar4) * width * height, hipMemcpyDeviceToHost));
ERROR_HANDLING(hipUnbindTexture(texRef2D));
ERROR_HANDLING(hipFreeArray(c_arr));
ERROR_HANDLING(hipFree(dev_data));
file = fopen(outputFilename, "wb");
fwrite(&width, sizeof(int), 1, file);
fwrite(&height, sizeof(int), 1, file);
fwrite(io_data, sizeof(uchar4), width * height, file);
fclose(file);
free(io_data);
return 0;
}
|
c6d78c4465c428660307eb7fea50796caed08e95.cu
|
#include "stdio.h"
#include "stdlib.h"
#include <cmath>
using namespace std;
#define ERROR_HANDLING(call) { \
cudaError error = call; \
if(error != cudaSuccess) { \
fprintf(stderr, "ERROR: in file '%s' in line %i: %s.\n", \
__FILE__, __LINE__, cudaGetErrorString(error)); \
exit(1); \
} \
} while (0)
texture<uchar4, 2, cudaReadModeElementType> texRef2D;
__device__
double getBrightnessFromRGB(uchar4 p) {
return 0.299*p.x + 0.587*p.y + 0.114*p.z;
}
__device__
int convolution(double window[3][3]) {
double Gx = window[0][2] - window[0][0] + window[1][2] - window[1][0] + window[2][2] - window[2][0];
double Gy = window[2][0] - window[0][0] + window[2][1] - window[0][1] + window[2][2] - window[0][2];
double eps = 1e-10;
double G = sqrt(Gx*Gx+Gy*Gy);
int res = abs(((int)G+1.0)-G)<eps ? (int)G+1 : (int)G;
if (res < 0)
res = 0;
else if (res > 255)
res = 255;
return res;
}
__global__
void PrewittKernel(uchar4 *device_data, int width, int height) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int x, y;
const int windowWidth = 3;
const int windowHeight = 3;
for(x = idx; x < width; x += offsetx)
for(y = idy; y < height; y += offsety) {
uchar4 windowPoints[windowHeight][windowWidth];
uchar4 centerPoint = tex2D(texRef2D, x, y);
windowPoints[0][0] = tex2D(texRef2D, x-1, y-1);
windowPoints[0][1] = tex2D(texRef2D, x, y-1);
windowPoints[0][2] = tex2D(texRef2D, x+1, y-1);
windowPoints[1][0] = tex2D(texRef2D, x-1, y);
windowPoints[1][1] = centerPoint;
windowPoints[1][2] = tex2D(texRef2D, x+1, y);
windowPoints[2][0] = tex2D(texRef2D, x-1, y+1);
windowPoints[2][1] = tex2D(texRef2D, x, y+1);
windowPoints[2][2] = tex2D(texRef2D, x+1, y+1);
double windowBrightnesses[windowWidth][windowHeight];
for (int i = 0; i < windowHeight; i++)
for (int j = 0; j < windowWidth; j++) {
windowBrightnesses[i][j] = getBrightnessFromRGB(windowPoints[i][j]);
}
int resBrightness = convolution(windowBrightnesses);
uchar4 resPoint = make_uchar4(resBrightness, resBrightness, resBrightness, centerPoint.w);
device_data[y * width + x] = resPoint;
}
}
int main(int argc, char *argv[]) {
int width, height;
char inputFilename[256];
char outputFilename[256];
scanf("%s", inputFilename);
scanf("%s", outputFilename);
FILE* file = fopen(inputFilename, "rb");
fread(&width, sizeof(int), 1, file);
fread(&height, sizeof(int), 1, file);
uchar4* io_data = (uchar4*)malloc(sizeof(uchar4)*height*width);
fread(io_data, sizeof(uchar4), width*height, file);
fclose(file);
cudaArray *c_arr;
cudaChannelFormatDesc ch = cudaCreateChannelDesc<uchar4>();
ERROR_HANDLING(cudaMallocArray(&c_arr, &ch, width, height));
ERROR_HANDLING(cudaMemcpyToArray(c_arr, 0, 0, io_data, sizeof(uchar4) * width * height, cudaMemcpyHostToDevice));
texRef2D.addressMode[0] = cudaAddressModeClamp;
texRef2D.addressMode[1] = cudaAddressModeClamp;
texRef2D.channelDesc = ch;
texRef2D.filterMode = cudaFilterModePoint;
texRef2D.normalized = false;
ERROR_HANDLING(cudaBindTextureToArray(texRef2D, c_arr, ch));
uchar4 *dev_data;
ERROR_HANDLING(cudaMalloc(&dev_data, sizeof(uchar4) * width * height));
PrewittKernel<<<dim3(32, 32), dim3(16, 16)>>>(dev_data, width, height);
ERROR_HANDLING(cudaGetLastError());
ERROR_HANDLING(cudaMemcpy(io_data, dev_data, sizeof(uchar4) * width * height, cudaMemcpyDeviceToHost));
ERROR_HANDLING(cudaUnbindTexture(texRef2D));
ERROR_HANDLING(cudaFreeArray(c_arr));
ERROR_HANDLING(cudaFree(dev_data));
file = fopen(outputFilename, "wb");
fwrite(&width, sizeof(int), 1, file);
fwrite(&height, sizeof(int), 1, file);
fwrite(io_data, sizeof(uchar4), width * height, file);
fclose(file);
free(io_data);
return 0;
}
|
c3fe53dda7500895ddf7e9c3397112b780306042.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// implementation of operations for Simple Recurrent Unit: arXiv:1709.02755v2 [cs.CL] 12 Sep 2017
//
// @author Yurii Shyrma, created on 05.12.2017
//
#include<ops/declarable/helpers/sru.h>
#include <array/NDArrayFactory.h>
#include <helpers/PointersManager.h>
#include <helpers/MmulHelper.h>
namespace sd {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
static FORCEINLINE NDArray activation(const NDArray& arr) {
// return (const_cast<NDArray<T>&>(arr)).template transform<simdOps::Tanh<T>>();
auto result = NDArray(&arr, false, arr.getContext());
(const_cast<NDArray&>(arr)).applyTransform(transform::Tanh, result);
return result;
}
//////////////////////////////////////////////////////////////////////////
static FORCEINLINE NDArray sigmoid(const NDArray& arr) {
return (const_cast<NDArray&>(arr)).transform(transform::Sigmoid);
}
//////////////////////////////////////////////////////////////////////////
ND4J_LOCAL void sruCell(sd::LaunchContext * context, const NDArray* x, const NDArray* c0, const NDArray* w, const NDArray* b, NDArray* h, NDArray* c) {
// x input [bS x inSize], bS - batch size, inSize - number of features
// c0 previous cell state c [bS x inSize], that is at previous time step t-1
// w weights [inSize x 3*inSize]
// b biases [2*inSize]
// h current cell output [bS x inSize], that is at current time step t
// c current cell state [bS x inSize], that is at current time step t
const int inSize = x->sizeAt(1); // inSize - number of features
auto z = mmul(*x, *w); // [bS x 3*inSize]
// forget gate = sigmoid(x*Wf + bf)
auto f = sigmoid(z({0,0, inSize, 2*inSize}) + (*b)({0, inSize}));
// reset gate = sigmoid(x*Wr + br)
auto r = sigmoid(z({0,0, 2*inSize, 3*inSize}) + (*b)({inSize, 2*inSize}));
// means element-wise product or so called Hadamard product
// current sell state = fc0 + (1 - f)(x*Wc)
c->assign(f * (*c0) + (1.f - f) * z({0, 0 ,0, inSize}) );
// *c = f*(*c0 - z({},{0, inSize})) + z({{},{0, inSize}});
// current cell output = ractivation(c) + (1 - r)x
h->assign( r * activation(*c) + (1.f - r) * (*x) );
// *h = r * (activation<T>(c) - *x) + *x;
}
//////////////////////////////////////////////////////////////////////////
void sruTimeLoop(sd::LaunchContext * context, const NDArray* x, const NDArray* c0, const NDArray* w, const NDArray* b, NDArray* h, NDArray* c) {
// x input [bS x inSize x time]
// c0 initial cell state (at time step = 0) [bS x inSize],
// w weights, [3*inSize x inSize]
// b biases, [2*inSize]
// h cell outputs [bS x inSize x time]
// c cell states [bS x inSize x time]
auto wT = w->transpose(); // [3*inSize x inSize] -> [inSize x 3*inSize]
const int time = x->sizeAt(2);
NDArray ct_1(*c0);
// loop through time steps
for (int t = 0; t < time; ++t) {
auto xt = (*x)({0,0, 0,0, t,t+1});
auto ht = (*h)({0,0, 0,0, t,t+1});
auto ct = (*c)({0,0, 0,0, t,t+1});
helpers::sruCell(context, &xt, &ct_1, &wT, b, &ht, &ct);
ct_1.assign(ct);
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void sruBICuda(const void* vx, const Nd4jLong* xShapeInfo,
const void* vwi, const Nd4jLong* wiShapeInfo,
const void* vb, const Nd4jLong* bShapeInfo,
const void* vc0, const Nd4jLong* c0ShapeInfo,
const void* vmask, const Nd4jLong* maskShapeInfo,
void* vht, const Nd4jLong* htShapeInfo,
void* vct, const Nd4jLong* ctShapeInfo) {
// inputs:
// x [time, bS, 2*K]
// wi [time, bS, 6*K], wi = mmul(x, weights);
// b [4*K]
// c0 [bS, 2*K]
// mask [bS, 2*K], optional
// outputs
// ht [time, bS, 2*K]
// ct [time, bS, 2*K]
const auto x = reinterpret_cast<const T*>(vx);
const auto wi = reinterpret_cast<const T*>(vwi);
const auto b = reinterpret_cast<const T*>(vb);
const auto c0 = reinterpret_cast<const T*>(vc0);
const auto mask = reinterpret_cast<const T*>(vmask);
auto ht = reinterpret_cast<T*>(vht);
auto ct = reinterpret_cast<T*>(vct);
const int rank = 3;
__shared__ int time, K, *sharedMem;
__shared__ Nd4jLong len, totalThreads;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<int*>(shmem);
time = xShapeInfo[1];
K = xShapeInfo[3] / 2;
len = xShapeInfo[2] * xShapeInfo[3]; // 2*K*bS
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto coords = sharedMem + threadIdx.x * rank;
if(tid >= len)
return;
shape::index2coords(tid, rank - 1, xShapeInfo + 2, coords + 1); // loop through last two dimensions of x : {bS, 2*K}
const auto maskOffst = mask ? shape::getOffset(maskShapeInfo, coords + 1) : 0;
const auto c0Offset = shape::getOffset(c0ShapeInfo, coords + 1);
const auto bFOffset = shape::getOffset(bShapeInfo, coords + 2);
const auto bROffset = bFOffset + 2 * K * bShapeInfo[2]; // 2*K*b_stride
const T maskVal = mask ? mask[maskOffst] : static_cast<T>(1);
const T bF = b[bFOffset];
const T bR = b[bROffset];
T c0Val = c0[c0Offset];
const bool flip = coords[2] >= K;
if(flip)
coords[0] = time - 1;
else
coords[0] = 0;
auto xOffset = shape::getOffset(xShapeInfo, coords);
auto htOffset = shape::getOffset(htShapeInfo, coords);
auto ctOffset = shape::getOffset(ctShapeInfo, coords);
coords[2] *= 3;
auto wiOffset0 = shape::getOffset(wiShapeInfo, coords);
auto wiOffset1 = wiOffset0 + wiShapeInfo[rank + 3]; // add last stride
auto wiOffset2 = wiOffset1 + wiShapeInfo[rank + 3]; // add last stride
// time loop
for (uint t = 0; t < time; ++t) {
// evaluate sigmoids
T ft = (1.f)/(1.f + sd::math::nd4j_exp<T, T>(-(wi[wiOffset1] + bF)));
T rt = (1.f)/(1.f + sd::math::nd4j_exp<T, T>(-(wi[wiOffset2] + bR)));
c0Val = (c0Val - wi[wiOffset0]) * ft + wi[wiOffset0];
ct[ctOffset] = c0Val;
T val = sd::math::nd4j_tanh<T, T>(c0Val);
T xVal = x[xOffset];
ht[htOffset] = (val * maskVal - xVal) * rt + xVal;
if(flip) {
xOffset -= xShapeInfo[rank + 1]; // first stride, corresponds to time step
htOffset -= htShapeInfo[rank + 1];
ctOffset -= htShapeInfo[rank + 1];
wiOffset0 -= wiShapeInfo[rank + 1];
wiOffset1 -= wiShapeInfo[rank + 1];
wiOffset2 -= wiShapeInfo[rank + 1];
}
else {
xOffset += xShapeInfo[rank + 1]; // first stride, corresponds to time step
htOffset += htShapeInfo[rank + 1];
ctOffset += htShapeInfo[rank + 1];
wiOffset0 += wiShapeInfo[rank + 1];
wiOffset1 += wiShapeInfo[rank + 1];
wiOffset2 += wiShapeInfo[rank + 1];
}
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void sruBICudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vwi, const Nd4jLong* wiShapeInfo,
const void* vb, const Nd4jLong* bShapeInfo,
const void* vc0, const Nd4jLong* c0ShapeInfo,
const void* vmask, const Nd4jLong* maskShapeInfo,
void* vht, const Nd4jLong* htShapeInfo,
void* vct, const Nd4jLong* ctShapeInfo) {
hipLaunchKernelGGL(( sruBICuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vwi, wiShapeInfo, vb, bShapeInfo, vc0, c0ShapeInfo, vmask, maskShapeInfo, vht, htShapeInfo, vct, ctShapeInfo);
}
//////////////////////////////////////////////////////////////////////////
ND4J_LOCAL void sruBI(sd::LaunchContext * context, NDArray* x, const NDArray* w, const NDArray* b, const NDArray* c0, const NDArray* mask, NDArray* ht, NDArray* ct) {
// x = x * mask
if(mask)
x->applyBroadcast(broadcast::Multiply, {1, 2}, *mask, *x); // apply mask
// U = x * w
NDArray wi = mmul(*x, *w); // U [time x bS x 6*K]
PointersManager manager(context, "sru_bi");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (x->sizeAt(1) * x->sizeAt(2) + threadsPerBlock - 1) / threadsPerBlock; // loop through last two dimensions of x array -> bS, 2*K
const int sharedMem = threadsPerBlock * sizeof(int) * x->rankOf() + 128;
NDArray::prepareSpecialUse({ht, ct}, {x, &wi, b, c0, mask});
BUILD_SINGLE_SELECTOR(x->dataType(), sruBICudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), x->specialBuffer(), x->specialShapeInfo(), wi.specialBuffer(), wi.specialShapeInfo(), b->specialBuffer(), b->specialShapeInfo(), c0->specialBuffer(), c0->specialShapeInfo(), mask ? mask->specialBuffer() : nullptr, mask ? mask->specialShapeInfo() : nullptr, ht->specialBuffer(), ht->specialShapeInfo(), ct->specialBuffer(), ct->specialShapeInfo()), FLOAT_TYPES);
NDArray::registerSpecialUse({ht, ct}, {x, &wi, b, c0, mask});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void sruBIBPCuda(const void* vx, const Nd4jLong* xShapeInfo,
const void* vwi, const Nd4jLong* wiShapeInfo,
const void* vb, const Nd4jLong* bShapeInfo,
const void* vc0, const Nd4jLong* c0ShapeInfo,
const void* vmask, const Nd4jLong* maskShapeInfo,
const void* vct, const Nd4jLong* ctShapeInfo,
const void* vgradHt, const Nd4jLong* gradHtShapeInfo,
const void* vgradCt, const Nd4jLong* gradCtShapeInfo,
void* vgradI, const Nd4jLong* gradIShapeInfo,
void* vgradWi, const Nd4jLong* gradWiShapeInfo,
void* vgradB, const Nd4jLong* gradBShapeInfo,
void* vgradC0, const Nd4jLong* gradC0ShapeInfo) {
// inputs:
// x [time, bS, 2*K]
// wi [time, bS, 6*K], wi = mmul(x, weights);
// b [4*K]
// c0 [bS, 2*K]
// mask [bS, 2*K], optional
// ct [time, bS, 2*K]
// gradHt [time, bS, 2*K]
// gradCt [bS, 2*K]
// outputs
// gradI [time, bS, 2*K]
// gradWi [time, 2*K, 6*K]
// gradB [bS, 4*K]
// gradC0 [bS, 2*K]
const auto x = reinterpret_cast<const T*>(vx);
const auto wi = reinterpret_cast<const T*>(vwi);
const auto b = reinterpret_cast<const T*>(vb);
const auto c0 = reinterpret_cast<const T*>(vc0);
const auto mask = reinterpret_cast<const T*>(vmask);
const auto ct = reinterpret_cast<const T*>(vct);
const auto gradHt = reinterpret_cast<const T*>(vgradHt);
const auto gradCt = reinterpret_cast<const T*>(vgradCt);
auto gradI = reinterpret_cast<T*>(vgradI);
auto gradWi = reinterpret_cast<T*>(vgradWi);
auto gradB = reinterpret_cast<T*>(vgradB);
auto gradC0 = reinterpret_cast<T*>(vgradC0);
const int rank = 3;
__shared__ int time, K, *sharedMem;
__shared__ Nd4jLong len, totalThreads;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<int*>(shmem);
time = xShapeInfo[1];
K = xShapeInfo[3] / 2;
len = xShapeInfo[2] * xShapeInfo[3]; // 2*K*bS
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto coords = sharedMem + threadIdx.x * rank;
if(tid >= len)
return;
shape::index2coords(tid, rank - 1, xShapeInfo + 2, coords + 1); // loop through last two dimensions of x : {bS, 2*K}
const auto maskOffst = mask ? shape::getOffset(maskShapeInfo, coords + 1) : 0;
const auto c0Offset = shape::getOffset(c0ShapeInfo, coords + 1);
const auto gradCtOffset = shape::getOffset(gradCtShapeInfo, coords + 1);
const auto gradC0Offset = shape::getOffset(gradC0ShapeInfo, coords + 1);
const auto bFOffset = shape::getOffset(bShapeInfo, coords + 2);
const auto bROffset = bFOffset + 2 * K * bShapeInfo[2]; // 2*K*b_stride
// const auto gradBFOffset = shape::getOffset(gradBShapeInfo, coords + 1);
const auto gradBFOffset = coords[1] * gradBShapeInfo[3] / 2 + coords[2] * gradBShapeInfo[4];
const auto gradBROffset = gradBFOffset + gradBShapeInfo[3];
const bool flip = coords[2] >= K;
if(flip)
coords[0] = 0;
else
coords[0] = time - 1;
auto xOffset = shape::getOffset(xShapeInfo, coords);
auto ctOffset = shape::getOffset(ctShapeInfo, coords);
auto gradIOffset = shape::getOffset(gradIShapeInfo, coords);
auto gradHtOffset = shape::getOffset(gradHtShapeInfo, coords);
coords[2] *= 3;
auto gradWiOffset0 = shape::getOffset(gradWiShapeInfo, coords);
auto gradWiOffset1 = gradWiOffset0 + gradWiShapeInfo[rank + 3]; // add last stride
auto gradWiOffset2 = gradWiOffset1 + gradWiShapeInfo[rank + 3]; // add last stride
auto wiOffset0 = shape::getOffset(wiShapeInfo, coords);
auto wiOffset1 = wiOffset0 + wiShapeInfo[rank + 3]; // add last stride
auto wiOffset2 = wiOffset1 + wiShapeInfo[rank + 3]; // add last stride
const T xVal = x[xOffset];
const T maskVal = mask ? mask[maskOffst] : static_cast<T>(1);
const T c0Val = c0[c0Offset];
const T bF = b[bFOffset];
const T bR = b[bROffset];
T gradCtVal = gradCt[gradCtOffset];
T gbF = 0.f;
T gbR = 0.f;
// time loop
for (uint t = 0; t < time; ++t) {
// evaluate sigmoids
T ft = (1.f)/(1.f + sd::math::nd4j_exp<T, T>(-(wi[wiOffset1] + bF)));
T rt = (1.f)/(1.f + sd::math::nd4j_exp<T, T>(-(wi[wiOffset2] + bR)));
T val = sd::math::nd4j_tanh<T,T>(ct[ctOffset]);
T prevVal;
if(t < time-1)
prevVal = ct[ctOffset += flip ? ctShapeInfo[rank + 1] : -ctShapeInfo[rank + 1]];
else
prevVal = c0Val;
// grad wrt input
gradI[gradIOffset] = gradHt[gradHtOffset] - gradHt[gradHtOffset] * rt ;
// grad wrt rt, wiR and bR
T grt = gradHt[gradHtOffset] * (val * maskVal - x[xOffset]) * (rt - rt * rt);
gradWi[gradWiOffset2] = grt;
gbR += grt;
// grad wrt state
T gradC0Val = gradHt[gradHtOffset] * maskVal * (rt - rt * val * val) + gradCtVal;
// grad wrt wi0
gradWi[gradWiOffset0] = gradC0Val - gradC0Val * ft;
// grad wrt ft, wi1, and bF
T gft = gradC0Val * (prevVal - wi[wiOffset0]) * (ft - ft * ft);
gradWi[gradWiOffset1] = gft;
gbF += gft;
// grad wrt c_previous
gradCtVal = gradC0Val * ft;
if(flip) {
xOffset += xShapeInfo[rank + 1]; // first stride, corresponds to time step
gradHtOffset += gradHtShapeInfo[rank + 1];
gradIOffset += gradIShapeInfo[rank + 1];
wiOffset0 += wiShapeInfo[rank + 1];
wiOffset1 += wiShapeInfo[rank + 1];
wiOffset2 += wiShapeInfo[rank + 1];
gradWiOffset0 += gradWiShapeInfo[rank + 1];
gradWiOffset1 += gradWiShapeInfo[rank + 1];
gradWiOffset2 += gradWiShapeInfo[rank + 1];
}
else {
xOffset -= xShapeInfo[rank + 1]; // first stride, corresponds to time step
gradHtOffset -= gradHtShapeInfo[rank + 1];
gradIOffset -= gradIShapeInfo[rank + 1];
wiOffset0 -= wiShapeInfo[rank + 1];
wiOffset1 -= wiShapeInfo[rank + 1];
wiOffset2 -= wiShapeInfo[rank + 1];
gradWiOffset0 -= gradWiShapeInfo[rank + 1];
gradWiOffset1 -= gradWiShapeInfo[rank + 1];
gradWiOffset2 -= gradWiShapeInfo[rank + 1];
}
}
gradB[gradBFOffset] = gbF;
gradB[gradBROffset] = gbR;
gradC0[gradC0Offset] = gradCtVal;
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void sruBIBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vwi, const Nd4jLong* wiShapeInfo,
const void* vb, const Nd4jLong* bShapeInfo,
const void* vc0, const Nd4jLong* c0ShapeInfo,
const void* vmask, const Nd4jLong* maskShapeInfo,
const void* vct, const Nd4jLong* ctShapeInfo,
const void* vgradHt, const Nd4jLong* gradHtShapeInfo,
const void* vgradCt, const Nd4jLong* gradCtShapeInfo,
void* vgradI, const Nd4jLong* gradIShapeInfo,
void* vgradWi, const Nd4jLong* gradWiShapeInfo,
void* vgradB, const Nd4jLong* gradBShapeInfo,
void* vgradC0, const Nd4jLong* gradC0ShapeInfo) {
hipLaunchKernelGGL(( sruBIBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vwi, wiShapeInfo, vb, bShapeInfo, vc0, c0ShapeInfo, vmask, maskShapeInfo, vct, ctShapeInfo, vgradHt, gradHtShapeInfo, vgradCt, gradCtShapeInfo, vgradI, gradIShapeInfo, vgradWi, gradWiShapeInfo, vgradB, gradBShapeInfo, vgradC0, gradC0ShapeInfo);
}
BUILD_SINGLE_TEMPLATE(template void sruBIBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vwi, const Nd4jLong* wiShapeInfo, const void* vb, const Nd4jLong* bShapeInfo, const void* vc0, const Nd4jLong* c0ShapeInfo, const void* vmask, const Nd4jLong* maskShapeInfo, const void* vct, const Nd4jLong* ctShapeInfo, const void* vgradHt, const Nd4jLong* gradHtShapeInfo, const void* vgradCt, const Nd4jLong* gradCtShapeInfo, void* vgradI, const Nd4jLong* gradIShapeInfo, void* vgradWi, const Nd4jLong* gradWiShapeInfo, void* vgradB, const Nd4jLong* gradBShapeInfo, void* vgradC0, const Nd4jLong* gradC0ShapeInfo), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
void sruBIBP(sd::LaunchContext* context, NDArray* x, const NDArray* w, const NDArray* b, const NDArray* c0, const NDArray* ct,
const NDArray* gradCt, const NDArray* gradHt, const NDArray* mask,
NDArray* gradI, NDArray* gradW, NDArray* gradB, NDArray* gradC0) {
// x = x * mask
if(mask)
x->applyBroadcast(broadcast::Multiply, {1, 2}, *mask, *x); // apply mask
// U = x * w
NDArray wi = mmul(*x, *w); // U [time x bS x 6*K]
const int time = x->sizeAt(0);
const int bS = x->sizeAt(1);
const int K = x->sizeAt(2) / 2;
NDArray gradBias(x->ordering(), {bS, 4*K}, x->dataType(), context);
NDArray gradWi (x->ordering(), {time, bS, 6*K}, x->dataType(), context);
PointersManager manager(context, "sru_bi_bp");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (x->sizeAt(1) * x->sizeAt(2) + threadsPerBlock - 1) / threadsPerBlock; // loop through last two dimensions of x array -> bS, 2*K
const int sharedMem = threadsPerBlock * sizeof(int) * x->rankOf() + 128;
NDArray::prepareSpecialUse({gradI, &gradWi, &gradBias, gradC0}, {x, &wi, b, c0, ct, gradCt, gradHt, mask});
BUILD_SINGLE_SELECTOR(x->dataType(), sruBIBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), x->specialBuffer(), x->specialShapeInfo(), wi.specialBuffer(), wi.specialShapeInfo(), b->specialBuffer(), b->specialShapeInfo(), c0->specialBuffer(), c0->specialShapeInfo(), mask ? mask->specialBuffer() : nullptr, mask ? mask->specialShapeInfo() : nullptr, ct->specialBuffer(), ct->specialShapeInfo(), gradHt->specialBuffer(), gradHt->specialShapeInfo(), gradCt->specialBuffer(), gradCt->specialShapeInfo(), gradI->specialBuffer(), gradI->specialShapeInfo(), gradWi.specialBuffer(), gradWi.specialShapeInfo(), gradBias.specialBuffer(), gradBias.specialShapeInfo(), gradC0->specialBuffer(), gradC0->specialShapeInfo()), FLOAT_TYPES);
NDArray::registerSpecialUse({gradI, &gradWi, &gradBias, gradC0}, {x, &wi, b, c0, ct, gradCt, gradHt, mask});
manager.synchronize();
// gradB
gradBias.reduceAlongDimension(reduce::Sum, *gradB, {0}); // [4*K]
// gradW
x->permutei({0, 2, 1}); // [time, bS, 2*K] -> [time, 2*K, bS]
MmulHelper::mmul(x, &gradWi, gradW, 1., 0.); // [time, 2*K, bS] x [time, bS , 6*K] = [time, 2*K, 6*K]
}
}
}
}
|
c3fe53dda7500895ddf7e9c3397112b780306042.cu
|
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// implementation of operations for Simple Recurrent Unit: arXiv:1709.02755v2 [cs.CL] 12 Sep 2017
//
// @author Yurii Shyrma, created on 05.12.2017
//
#include<ops/declarable/helpers/sru.h>
#include <array/NDArrayFactory.h>
#include <helpers/PointersManager.h>
#include <helpers/MmulHelper.h>
namespace sd {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
static FORCEINLINE NDArray activation(const NDArray& arr) {
// return (const_cast<NDArray<T>&>(arr)).template transform<simdOps::Tanh<T>>();
auto result = NDArray(&arr, false, arr.getContext());
(const_cast<NDArray&>(arr)).applyTransform(transform::Tanh, result);
return result;
}
//////////////////////////////////////////////////////////////////////////
static FORCEINLINE NDArray sigmoid(const NDArray& arr) {
return (const_cast<NDArray&>(arr)).transform(transform::Sigmoid);
}
//////////////////////////////////////////////////////////////////////////
ND4J_LOCAL void sruCell(sd::LaunchContext * context, const NDArray* x, const NDArray* c0, const NDArray* w, const NDArray* b, NDArray* h, NDArray* c) {
// x input [bS x inSize], bS - batch size, inSize - number of features
// c0 previous cell state c [bS x inSize], that is at previous time step t-1
// w weights [inSize x 3*inSize]
// b biases [2*inSize]
// h current cell output [bS x inSize], that is at current time step t
// c current cell state [bS x inSize], that is at current time step t
const int inSize = x->sizeAt(1); // inSize - number of features
auto z = mmul(*x, *w); // [bS x 3*inSize]
// forget gate = sigmoid(x*Wf + bf)
auto f = sigmoid(z({0,0, inSize, 2*inSize}) + (*b)({0, inSize}));
// reset gate = sigmoid(x*Wr + br)
auto r = sigmoid(z({0,0, 2*inSize, 3*inSize}) + (*b)({inSize, 2*inSize}));
// ◦ means element-wise product or so called Hadamard product
// current sell state = f◦c0 + (1 - f)◦(x*Wc)
c->assign(f * (*c0) + (1.f - f) * z({0, 0 ,0, inSize}) );
// *c = f*(*c0 - z({},{0, inSize})) + z({{},{0, inSize}});
// current cell output = r◦activation(c) + (1 - r)◦x
h->assign( r * activation(*c) + (1.f - r) * (*x) );
// *h = r * (activation<T>(c) - *x) + *x;
}
//////////////////////////////////////////////////////////////////////////
void sruTimeLoop(sd::LaunchContext * context, const NDArray* x, const NDArray* c0, const NDArray* w, const NDArray* b, NDArray* h, NDArray* c) {
// x input [bS x inSize x time]
// c0 initial cell state (at time step = 0) [bS x inSize],
// w weights, [3*inSize x inSize]
// b biases, [2*inSize]
// h cell outputs [bS x inSize x time]
// c cell states [bS x inSize x time]
auto wT = w->transpose(); // [3*inSize x inSize] -> [inSize x 3*inSize]
const int time = x->sizeAt(2);
NDArray ct_1(*c0);
// loop through time steps
for (int t = 0; t < time; ++t) {
auto xt = (*x)({0,0, 0,0, t,t+1});
auto ht = (*h)({0,0, 0,0, t,t+1});
auto ct = (*c)({0,0, 0,0, t,t+1});
helpers::sruCell(context, &xt, &ct_1, &wT, b, &ht, &ct);
ct_1.assign(ct);
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void sruBICuda(const void* vx, const Nd4jLong* xShapeInfo,
const void* vwi, const Nd4jLong* wiShapeInfo,
const void* vb, const Nd4jLong* bShapeInfo,
const void* vc0, const Nd4jLong* c0ShapeInfo,
const void* vmask, const Nd4jLong* maskShapeInfo,
void* vht, const Nd4jLong* htShapeInfo,
void* vct, const Nd4jLong* ctShapeInfo) {
// inputs:
// x [time, bS, 2*K]
// wi [time, bS, 6*K], wi = mmul(x, weights);
// b [4*K]
// c0 [bS, 2*K]
// mask [bS, 2*K], optional
// outputs
// ht [time, bS, 2*K]
// ct [time, bS, 2*K]
const auto x = reinterpret_cast<const T*>(vx);
const auto wi = reinterpret_cast<const T*>(vwi);
const auto b = reinterpret_cast<const T*>(vb);
const auto c0 = reinterpret_cast<const T*>(vc0);
const auto mask = reinterpret_cast<const T*>(vmask);
auto ht = reinterpret_cast<T*>(vht);
auto ct = reinterpret_cast<T*>(vct);
const int rank = 3;
__shared__ int time, K, *sharedMem;
__shared__ Nd4jLong len, totalThreads;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<int*>(shmem);
time = xShapeInfo[1];
K = xShapeInfo[3] / 2;
len = xShapeInfo[2] * xShapeInfo[3]; // 2*K*bS
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto coords = sharedMem + threadIdx.x * rank;
if(tid >= len)
return;
shape::index2coords(tid, rank - 1, xShapeInfo + 2, coords + 1); // loop through last two dimensions of x : {bS, 2*K}
const auto maskOffst = mask ? shape::getOffset(maskShapeInfo, coords + 1) : 0;
const auto c0Offset = shape::getOffset(c0ShapeInfo, coords + 1);
const auto bFOffset = shape::getOffset(bShapeInfo, coords + 2);
const auto bROffset = bFOffset + 2 * K * bShapeInfo[2]; // 2*K*b_stride
const T maskVal = mask ? mask[maskOffst] : static_cast<T>(1);
const T bF = b[bFOffset];
const T bR = b[bROffset];
T c0Val = c0[c0Offset];
const bool flip = coords[2] >= K;
if(flip)
coords[0] = time - 1;
else
coords[0] = 0;
auto xOffset = shape::getOffset(xShapeInfo, coords);
auto htOffset = shape::getOffset(htShapeInfo, coords);
auto ctOffset = shape::getOffset(ctShapeInfo, coords);
coords[2] *= 3;
auto wiOffset0 = shape::getOffset(wiShapeInfo, coords);
auto wiOffset1 = wiOffset0 + wiShapeInfo[rank + 3]; // add last stride
auto wiOffset2 = wiOffset1 + wiShapeInfo[rank + 3]; // add last stride
// time loop
for (uint t = 0; t < time; ++t) {
// evaluate sigmoids
T ft = (1.f)/(1.f + sd::math::nd4j_exp<T, T>(-(wi[wiOffset1] + bF)));
T rt = (1.f)/(1.f + sd::math::nd4j_exp<T, T>(-(wi[wiOffset2] + bR)));
c0Val = (c0Val - wi[wiOffset0]) * ft + wi[wiOffset0];
ct[ctOffset] = c0Val;
T val = sd::math::nd4j_tanh<T, T>(c0Val);
T xVal = x[xOffset];
ht[htOffset] = (val * maskVal - xVal) * rt + xVal;
if(flip) {
xOffset -= xShapeInfo[rank + 1]; // first stride, corresponds to time step
htOffset -= htShapeInfo[rank + 1];
ctOffset -= htShapeInfo[rank + 1];
wiOffset0 -= wiShapeInfo[rank + 1];
wiOffset1 -= wiShapeInfo[rank + 1];
wiOffset2 -= wiShapeInfo[rank + 1];
}
else {
xOffset += xShapeInfo[rank + 1]; // first stride, corresponds to time step
htOffset += htShapeInfo[rank + 1];
ctOffset += htShapeInfo[rank + 1];
wiOffset0 += wiShapeInfo[rank + 1];
wiOffset1 += wiShapeInfo[rank + 1];
wiOffset2 += wiShapeInfo[rank + 1];
}
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void sruBICudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vwi, const Nd4jLong* wiShapeInfo,
const void* vb, const Nd4jLong* bShapeInfo,
const void* vc0, const Nd4jLong* c0ShapeInfo,
const void* vmask, const Nd4jLong* maskShapeInfo,
void* vht, const Nd4jLong* htShapeInfo,
void* vct, const Nd4jLong* ctShapeInfo) {
sruBICuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vwi, wiShapeInfo, vb, bShapeInfo, vc0, c0ShapeInfo, vmask, maskShapeInfo, vht, htShapeInfo, vct, ctShapeInfo);
}
//////////////////////////////////////////////////////////////////////////
ND4J_LOCAL void sruBI(sd::LaunchContext * context, NDArray* x, const NDArray* w, const NDArray* b, const NDArray* c0, const NDArray* mask, NDArray* ht, NDArray* ct) {
// x = x * mask
if(mask)
x->applyBroadcast(broadcast::Multiply, {1, 2}, *mask, *x); // apply mask
// U = x * w
NDArray wi = mmul(*x, *w); // U [time x bS x 6*K]
PointersManager manager(context, "sru_bi");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (x->sizeAt(1) * x->sizeAt(2) + threadsPerBlock - 1) / threadsPerBlock; // loop through last two dimensions of x array -> bS, 2*K
const int sharedMem = threadsPerBlock * sizeof(int) * x->rankOf() + 128;
NDArray::prepareSpecialUse({ht, ct}, {x, &wi, b, c0, mask});
BUILD_SINGLE_SELECTOR(x->dataType(), sruBICudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), x->specialBuffer(), x->specialShapeInfo(), wi.specialBuffer(), wi.specialShapeInfo(), b->specialBuffer(), b->specialShapeInfo(), c0->specialBuffer(), c0->specialShapeInfo(), mask ? mask->specialBuffer() : nullptr, mask ? mask->specialShapeInfo() : nullptr, ht->specialBuffer(), ht->specialShapeInfo(), ct->specialBuffer(), ct->specialShapeInfo()), FLOAT_TYPES);
NDArray::registerSpecialUse({ht, ct}, {x, &wi, b, c0, mask});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void sruBIBPCuda(const void* vx, const Nd4jLong* xShapeInfo,
const void* vwi, const Nd4jLong* wiShapeInfo,
const void* vb, const Nd4jLong* bShapeInfo,
const void* vc0, const Nd4jLong* c0ShapeInfo,
const void* vmask, const Nd4jLong* maskShapeInfo,
const void* vct, const Nd4jLong* ctShapeInfo,
const void* vgradHt, const Nd4jLong* gradHtShapeInfo,
const void* vgradCt, const Nd4jLong* gradCtShapeInfo,
void* vgradI, const Nd4jLong* gradIShapeInfo,
void* vgradWi, const Nd4jLong* gradWiShapeInfo,
void* vgradB, const Nd4jLong* gradBShapeInfo,
void* vgradC0, const Nd4jLong* gradC0ShapeInfo) {
// inputs:
// x [time, bS, 2*K]
// wi [time, bS, 6*K], wi = mmul(x, weights);
// b [4*K]
// c0 [bS, 2*K]
// mask [bS, 2*K], optional
// ct [time, bS, 2*K]
// gradHt [time, bS, 2*K]
// gradCt [bS, 2*K]
// outputs
// gradI [time, bS, 2*K]
// gradWi [time, 2*K, 6*K]
// gradB [bS, 4*K]
// gradC0 [bS, 2*K]
const auto x = reinterpret_cast<const T*>(vx);
const auto wi = reinterpret_cast<const T*>(vwi);
const auto b = reinterpret_cast<const T*>(vb);
const auto c0 = reinterpret_cast<const T*>(vc0);
const auto mask = reinterpret_cast<const T*>(vmask);
const auto ct = reinterpret_cast<const T*>(vct);
const auto gradHt = reinterpret_cast<const T*>(vgradHt);
const auto gradCt = reinterpret_cast<const T*>(vgradCt);
auto gradI = reinterpret_cast<T*>(vgradI);
auto gradWi = reinterpret_cast<T*>(vgradWi);
auto gradB = reinterpret_cast<T*>(vgradB);
auto gradC0 = reinterpret_cast<T*>(vgradC0);
const int rank = 3;
__shared__ int time, K, *sharedMem;
__shared__ Nd4jLong len, totalThreads;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<int*>(shmem);
time = xShapeInfo[1];
K = xShapeInfo[3] / 2;
len = xShapeInfo[2] * xShapeInfo[3]; // 2*K*bS
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto coords = sharedMem + threadIdx.x * rank;
if(tid >= len)
return;
shape::index2coords(tid, rank - 1, xShapeInfo + 2, coords + 1); // loop through last two dimensions of x : {bS, 2*K}
const auto maskOffst = mask ? shape::getOffset(maskShapeInfo, coords + 1) : 0;
const auto c0Offset = shape::getOffset(c0ShapeInfo, coords + 1);
const auto gradCtOffset = shape::getOffset(gradCtShapeInfo, coords + 1);
const auto gradC0Offset = shape::getOffset(gradC0ShapeInfo, coords + 1);
const auto bFOffset = shape::getOffset(bShapeInfo, coords + 2);
const auto bROffset = bFOffset + 2 * K * bShapeInfo[2]; // 2*K*b_stride
// const auto gradBFOffset = shape::getOffset(gradBShapeInfo, coords + 1);
const auto gradBFOffset = coords[1] * gradBShapeInfo[3] / 2 + coords[2] * gradBShapeInfo[4];
const auto gradBROffset = gradBFOffset + gradBShapeInfo[3];
const bool flip = coords[2] >= K;
if(flip)
coords[0] = 0;
else
coords[0] = time - 1;
auto xOffset = shape::getOffset(xShapeInfo, coords);
auto ctOffset = shape::getOffset(ctShapeInfo, coords);
auto gradIOffset = shape::getOffset(gradIShapeInfo, coords);
auto gradHtOffset = shape::getOffset(gradHtShapeInfo, coords);
coords[2] *= 3;
auto gradWiOffset0 = shape::getOffset(gradWiShapeInfo, coords);
auto gradWiOffset1 = gradWiOffset0 + gradWiShapeInfo[rank + 3]; // add last stride
auto gradWiOffset2 = gradWiOffset1 + gradWiShapeInfo[rank + 3]; // add last stride
auto wiOffset0 = shape::getOffset(wiShapeInfo, coords);
auto wiOffset1 = wiOffset0 + wiShapeInfo[rank + 3]; // add last stride
auto wiOffset2 = wiOffset1 + wiShapeInfo[rank + 3]; // add last stride
const T xVal = x[xOffset];
const T maskVal = mask ? mask[maskOffst] : static_cast<T>(1);
const T c0Val = c0[c0Offset];
const T bF = b[bFOffset];
const T bR = b[bROffset];
T gradCtVal = gradCt[gradCtOffset];
T gbF = 0.f;
T gbR = 0.f;
// time loop
for (uint t = 0; t < time; ++t) {
// evaluate sigmoids
T ft = (1.f)/(1.f + sd::math::nd4j_exp<T, T>(-(wi[wiOffset1] + bF)));
T rt = (1.f)/(1.f + sd::math::nd4j_exp<T, T>(-(wi[wiOffset2] + bR)));
T val = sd::math::nd4j_tanh<T,T>(ct[ctOffset]);
T prevVal;
if(t < time-1)
prevVal = ct[ctOffset += flip ? ctShapeInfo[rank + 1] : -ctShapeInfo[rank + 1]];
else
prevVal = c0Val;
// grad wrt input
gradI[gradIOffset] = gradHt[gradHtOffset] - gradHt[gradHtOffset] * rt ;
// grad wrt rt, wiR and bR
T grt = gradHt[gradHtOffset] * (val * maskVal - x[xOffset]) * (rt - rt * rt);
gradWi[gradWiOffset2] = grt;
gbR += grt;
// grad wrt state
T gradC0Val = gradHt[gradHtOffset] * maskVal * (rt - rt * val * val) + gradCtVal;
// grad wrt wi0
gradWi[gradWiOffset0] = gradC0Val - gradC0Val * ft;
// grad wrt ft, wi1, and bF
T gft = gradC0Val * (prevVal - wi[wiOffset0]) * (ft - ft * ft);
gradWi[gradWiOffset1] = gft;
gbF += gft;
// grad wrt c_previous
gradCtVal = gradC0Val * ft;
if(flip) {
xOffset += xShapeInfo[rank + 1]; // first stride, corresponds to time step
gradHtOffset += gradHtShapeInfo[rank + 1];
gradIOffset += gradIShapeInfo[rank + 1];
wiOffset0 += wiShapeInfo[rank + 1];
wiOffset1 += wiShapeInfo[rank + 1];
wiOffset2 += wiShapeInfo[rank + 1];
gradWiOffset0 += gradWiShapeInfo[rank + 1];
gradWiOffset1 += gradWiShapeInfo[rank + 1];
gradWiOffset2 += gradWiShapeInfo[rank + 1];
}
else {
xOffset -= xShapeInfo[rank + 1]; // first stride, corresponds to time step
gradHtOffset -= gradHtShapeInfo[rank + 1];
gradIOffset -= gradIShapeInfo[rank + 1];
wiOffset0 -= wiShapeInfo[rank + 1];
wiOffset1 -= wiShapeInfo[rank + 1];
wiOffset2 -= wiShapeInfo[rank + 1];
gradWiOffset0 -= gradWiShapeInfo[rank + 1];
gradWiOffset1 -= gradWiShapeInfo[rank + 1];
gradWiOffset2 -= gradWiShapeInfo[rank + 1];
}
}
gradB[gradBFOffset] = gbF;
gradB[gradBROffset] = gbR;
gradC0[gradC0Offset] = gradCtVal;
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void sruBIBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vwi, const Nd4jLong* wiShapeInfo,
const void* vb, const Nd4jLong* bShapeInfo,
const void* vc0, const Nd4jLong* c0ShapeInfo,
const void* vmask, const Nd4jLong* maskShapeInfo,
const void* vct, const Nd4jLong* ctShapeInfo,
const void* vgradHt, const Nd4jLong* gradHtShapeInfo,
const void* vgradCt, const Nd4jLong* gradCtShapeInfo,
void* vgradI, const Nd4jLong* gradIShapeInfo,
void* vgradWi, const Nd4jLong* gradWiShapeInfo,
void* vgradB, const Nd4jLong* gradBShapeInfo,
void* vgradC0, const Nd4jLong* gradC0ShapeInfo) {
sruBIBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vwi, wiShapeInfo, vb, bShapeInfo, vc0, c0ShapeInfo, vmask, maskShapeInfo, vct, ctShapeInfo, vgradHt, gradHtShapeInfo, vgradCt, gradCtShapeInfo, vgradI, gradIShapeInfo, vgradWi, gradWiShapeInfo, vgradB, gradBShapeInfo, vgradC0, gradC0ShapeInfo);
}
BUILD_SINGLE_TEMPLATE(template void sruBIBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vwi, const Nd4jLong* wiShapeInfo, const void* vb, const Nd4jLong* bShapeInfo, const void* vc0, const Nd4jLong* c0ShapeInfo, const void* vmask, const Nd4jLong* maskShapeInfo, const void* vct, const Nd4jLong* ctShapeInfo, const void* vgradHt, const Nd4jLong* gradHtShapeInfo, const void* vgradCt, const Nd4jLong* gradCtShapeInfo, void* vgradI, const Nd4jLong* gradIShapeInfo, void* vgradWi, const Nd4jLong* gradWiShapeInfo, void* vgradB, const Nd4jLong* gradBShapeInfo, void* vgradC0, const Nd4jLong* gradC0ShapeInfo), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
void sruBIBP(sd::LaunchContext* context, NDArray* x, const NDArray* w, const NDArray* b, const NDArray* c0, const NDArray* ct,
const NDArray* gradCt, const NDArray* gradHt, const NDArray* mask,
NDArray* gradI, NDArray* gradW, NDArray* gradB, NDArray* gradC0) {
// x = x * mask
if(mask)
x->applyBroadcast(broadcast::Multiply, {1, 2}, *mask, *x); // apply mask
// U = x * w
NDArray wi = mmul(*x, *w); // U [time x bS x 6*K]
const int time = x->sizeAt(0);
const int bS = x->sizeAt(1);
const int K = x->sizeAt(2) / 2;
NDArray gradBias(x->ordering(), {bS, 4*K}, x->dataType(), context);
NDArray gradWi (x->ordering(), {time, bS, 6*K}, x->dataType(), context);
PointersManager manager(context, "sru_bi_bp");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (x->sizeAt(1) * x->sizeAt(2) + threadsPerBlock - 1) / threadsPerBlock; // loop through last two dimensions of x array -> bS, 2*K
const int sharedMem = threadsPerBlock * sizeof(int) * x->rankOf() + 128;
NDArray::prepareSpecialUse({gradI, &gradWi, &gradBias, gradC0}, {x, &wi, b, c0, ct, gradCt, gradHt, mask});
BUILD_SINGLE_SELECTOR(x->dataType(), sruBIBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), x->specialBuffer(), x->specialShapeInfo(), wi.specialBuffer(), wi.specialShapeInfo(), b->specialBuffer(), b->specialShapeInfo(), c0->specialBuffer(), c0->specialShapeInfo(), mask ? mask->specialBuffer() : nullptr, mask ? mask->specialShapeInfo() : nullptr, ct->specialBuffer(), ct->specialShapeInfo(), gradHt->specialBuffer(), gradHt->specialShapeInfo(), gradCt->specialBuffer(), gradCt->specialShapeInfo(), gradI->specialBuffer(), gradI->specialShapeInfo(), gradWi.specialBuffer(), gradWi.specialShapeInfo(), gradBias.specialBuffer(), gradBias.specialShapeInfo(), gradC0->specialBuffer(), gradC0->specialShapeInfo()), FLOAT_TYPES);
NDArray::registerSpecialUse({gradI, &gradWi, &gradBias, gradC0}, {x, &wi, b, c0, ct, gradCt, gradHt, mask});
manager.synchronize();
// gradB
gradBias.reduceAlongDimension(reduce::Sum, *gradB, {0}); // [4*K]
// gradW
x->permutei({0, 2, 1}); // [time, bS, 2*K] -> [time, 2*K, bS]
MmulHelper::mmul(x, &gradWi, gradW, 1., 0.); // [time, 2*K, bS] x [time, bS , 6*K] = [time, 2*K, 6*K]
}
}
}
}
|
f841151423b77a9dd13cf8b02b077318194237dc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "transposeSmemDyn.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
float *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
int nx = 1;
int ny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
transposeSmemDyn), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,nx,ny);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
transposeSmemDyn), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,nx,ny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
transposeSmemDyn), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,nx,ny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
f841151423b77a9dd13cf8b02b077318194237dc.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "transposeSmemDyn.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
float *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
int nx = 1;
int ny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
transposeSmemDyn<<<gridBlock,threadBlock>>>(out,in,nx,ny);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
transposeSmemDyn<<<gridBlock,threadBlock>>>(out,in,nx,ny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
transposeSmemDyn<<<gridBlock,threadBlock>>>(out,in,nx,ny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
c6c674308456f05a2f30ce1faff397ac0ef245b0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stereotgv.h"
__global__
void TgvLimitRangeKernel(float* src, float upperLimit,
int width, int height, int stride,
float *dst)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
/*if (src[pos] < 0.0f) {
dst[pos] = 0.0f;
}*/
if (src[pos] > upperLimit) {
dst[pos] = upperLimit;
}
else {
dst[pos] = src[pos];
}
}
void StereoTgv::LimitRange(float *src, float upperLimit, int w, int h, int s, float *dst)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
TgvLimitRangeKernel << <blocks, threads >> > (src, upperLimit, w, h, s, dst);
}
|
c6c674308456f05a2f30ce1faff397ac0ef245b0.cu
|
#include "stereotgv.h"
__global__
void TgvLimitRangeKernel(float* src, float upperLimit,
int width, int height, int stride,
float *dst)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
/*if (src[pos] < 0.0f) {
dst[pos] = 0.0f;
}*/
if (src[pos] > upperLimit) {
dst[pos] = upperLimit;
}
else {
dst[pos] = src[pos];
}
}
void StereoTgv::LimitRange(float *src, float upperLimit, int w, int h, int s, float *dst)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
TgvLimitRangeKernel << <blocks, threads >> > (src, upperLimit, w, h, s, dst);
}
|
9f2047b37cb8c83c6a620207d5627f857125b0c7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@author Mark Gates
@author Azzam Haidar
@generated from zlacpy.cu normal z -> s, Fri Mar 13 15:22:46 2015
*/
#include "common_magma.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for slaset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to slaset, slacpy, slag2d, clag2z, sgeadd.
*/
static __device__
void slacpy_full_device(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to slacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slacpy_lower_device(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to slacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slacpy_upper_device(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions.
*/
__global__
void slacpy_full_kernel(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void slacpy_lower_kernel(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_lower_device(m, n, dA, ldda, dB, lddb);
}
__global__
void slacpy_upper_kernel(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_upper_device(m, n, dA, ldda, dB, lddb);
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void slacpy_full_kernel_batched(
int m, int n,
float const * const *dAarray, int ldda,
float **dBarray, int lddb )
{
int batchid = blockIdx.z;
slacpy_full_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void slacpy_lower_kernel_batched(
int m, int n,
float const * const *dAarray, int ldda,
float **dBarray, int lddb )
{
int batchid = blockIdx.z;
slacpy_lower_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void slacpy_upper_kernel_batched(
int m, int n,
float const * const *dAarray, int ldda,
float **dBarray, int lddb )
{
int batchid = blockIdx.z;
slacpy_upper_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
//////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SLACPY_Q copies all or part of a two-dimensional matrix dA to another
matrix dB.
This is the same as SLACPY, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA REAL array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[out]
dB REAL array, dimension (LDDB,N)
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slacpy_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
#define dB(i_, j_) (dB + (i_) + (j_)*lddb)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if ( uplo == MagmaLower ) {
for( int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( slacpy_lower_kernel), dim3(grid), dim3(threads), 0, queue ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
hipLaunchKernelGGL(( slacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else if ( uplo == MagmaUpper ) {
for( int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( slacpy_upper_kernel), dim3(grid), dim3(threads), 0, queue ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
hipLaunchKernelGGL(( slacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else {
// TODO: use hipMemcpy or hipMemcpy2D ?
for( int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
hipLaunchKernelGGL(( slacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
/**
@see magmablas_slacpy_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slacpy(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr dB, magma_int_t lddb )
{
magmablas_slacpy_q( uplo, m, n, dA, ldda, dB, lddb, magma_stream );
}
////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SLACPY_BATCHED copies all or part of each two-dimensional matrix
dAarray[i] to matrix dBarray[i], for 0 <= i < batchcount.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dA
@param[in]
m INTEGER
The number of rows of each matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dA. N >= 0.
@param[in]
dAarray REAL* array, dimension (batchCount)
Array of pointers to the matrices dA, where each dA is of dimension (LDDA,N).
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of each array dA. LDDA >= max(1,M).
@param[out]
dBarray REAL* array, dimension (batchCount)
Array of pointers to the matrices dB, where each dB is of dimension (LDDB,N).
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of each array dB. LDDB >= max(1,M).
@param[in]
batchCount Number of matrices in dAarray and dBarray.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slacpy_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr const dAarray[], magma_int_t ldda,
magmaFloat_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount );
if ( uplo == MagmaLower ) {
hipLaunchKernelGGL(( slacpy_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, dAarray, ldda, dBarray, lddb );
}
else if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( slacpy_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, dAarray, ldda, dBarray, lddb );
}
else {
hipLaunchKernelGGL(( slacpy_full_kernel_batched) , dim3(grid), dim3(threads), 0, queue , m, n, dAarray, ldda, dBarray, lddb );
}
}
|
9f2047b37cb8c83c6a620207d5627f857125b0c7.cu
|
/*
-- MAGMA (version 1.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@author Mark Gates
@author Azzam Haidar
@generated from zlacpy.cu normal z -> s, Fri Mar 13 15:22:46 2015
*/
#include "common_magma.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for slaset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to slaset, slacpy, slag2d, clag2z, sgeadd.
*/
static __device__
void slacpy_full_device(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to slacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slacpy_lower_device(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to slacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slacpy_upper_device(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions.
*/
__global__
void slacpy_full_kernel(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void slacpy_lower_kernel(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_lower_device(m, n, dA, ldda, dB, lddb);
}
__global__
void slacpy_upper_kernel(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_upper_device(m, n, dA, ldda, dB, lddb);
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void slacpy_full_kernel_batched(
int m, int n,
float const * const *dAarray, int ldda,
float **dBarray, int lddb )
{
int batchid = blockIdx.z;
slacpy_full_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void slacpy_lower_kernel_batched(
int m, int n,
float const * const *dAarray, int ldda,
float **dBarray, int lddb )
{
int batchid = blockIdx.z;
slacpy_lower_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void slacpy_upper_kernel_batched(
int m, int n,
float const * const *dAarray, int ldda,
float **dBarray, int lddb )
{
int batchid = blockIdx.z;
slacpy_upper_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
//////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SLACPY_Q copies all or part of a two-dimensional matrix dA to another
matrix dB.
This is the same as SLACPY, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA REAL array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[out]
dB REAL array, dimension (LDDB,N)
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slacpy_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
#define dB(i_, j_) (dB + (i_) + (j_)*lddb)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if ( uplo == MagmaLower ) {
for( int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
slacpy_lower_kernel<<< grid, threads, 0, queue >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
slacpy_full_kernel <<< grid, threads, 0, queue >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else if ( uplo == MagmaUpper ) {
for( int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
slacpy_upper_kernel<<< grid, threads, 0, queue >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
slacpy_full_kernel <<< grid, threads, 0, queue >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else {
// TODO: use cudaMemcpy or cudaMemcpy2D ?
for( int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
slacpy_full_kernel <<< grid, threads, 0, queue >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
/**
@see magmablas_slacpy_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slacpy(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr dB, magma_int_t lddb )
{
magmablas_slacpy_q( uplo, m, n, dA, ldda, dB, lddb, magma_stream );
}
////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SLACPY_BATCHED copies all or part of each two-dimensional matrix
dAarray[i] to matrix dBarray[i], for 0 <= i < batchcount.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dA
@param[in]
m INTEGER
The number of rows of each matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dA. N >= 0.
@param[in]
dAarray REAL* array, dimension (batchCount)
Array of pointers to the matrices dA, where each dA is of dimension (LDDA,N).
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of each array dA. LDDA >= max(1,M).
@param[out]
dBarray REAL* array, dimension (batchCount)
Array of pointers to the matrices dB, where each dB is of dimension (LDDB,N).
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of each array dB. LDDB >= max(1,M).
@param[in]
batchCount Number of matrices in dAarray and dBarray.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slacpy_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr const dAarray[], magma_int_t ldda,
magmaFloat_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount );
if ( uplo == MagmaLower ) {
slacpy_lower_kernel_batched<<< grid, threads, 0, queue >>> ( m, n, dAarray, ldda, dBarray, lddb );
}
else if ( uplo == MagmaUpper ) {
slacpy_upper_kernel_batched<<< grid, threads, 0, queue >>> ( m, n, dAarray, ldda, dBarray, lddb );
}
else {
slacpy_full_kernel_batched <<< grid, threads, 0, queue >>> ( m, n, dAarray, ldda, dBarray, lddb );
}
}
|
94abe6c63ce2e0221bc948223d457b04c6f51db9.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include <limits>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <set>
#include <utility>
#include <cfloat>
#include <time.h>
#include <windows.h>
#include <string>
#include "helper_functions.h"
#include "helper_cuda.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void shfl_scan_test(int *data, int width, int *partial_sums = NULL)
{
extern __shared__ int sums[];
int id = ((blockIdx.x * blockDim.x) + threadIdx.x);
int lane_id = id % warpSize;
// determine a warp_id within a block
int warp_id = threadIdx.x / warpSize;
// Below is the basic structure of using a shfl instruction
// for a scan.
// Record "value" as a variable - we accumulate it along the way
int value = data[id];
// Now accumulate in log steps up the chain
// compute sums, with another thread's value who is
// distance delta away (i). Note
// those threads where the thread 'i' away would have
// been out of bounds of the warp are unaffected. This
// creates the scan sum.
#pragma unroll
for (int i = 1; i <= width; i *= 2)
{
int n = __shfl_up(value, i, width);
if (lane_id >= i) value += n;
}
// value now holds the scan value for the individual thread
// next sum the largest values for each warp
// write the sum of the warp to smem
if (threadIdx.x % warpSize == warpSize - 1)
{
sums[warp_id] = value;
}
__syncthreads();
//
// scan sum the warp sums
// the same shfl scan operation, but performed on warp sums
//
if (warp_id == 0 && lane_id < (blockDim.x / warpSize))
{
int warp_sum = sums[lane_id];
for (int i = 1; i <= width; i *= 2)
{
int n = __shfl_up(warp_sum, i, width);
if (lane_id >= i) warp_sum += n;
}
sums[lane_id] = warp_sum;
}
__syncthreads();
// perform a uniform add across warps in the block
// read neighbouring warp's sum and add it to threads value
int blockSum = 0;
if (warp_id > 0)
{
blockSum = sums[warp_id - 1];
}
value += blockSum;
// Now write out our result
data[id] = value;
// last thread has sum, write write out the block's sum
if (partial_sums != NULL && threadIdx.x == blockDim.x - 1)
{
partial_sums[blockIdx.x] = value;
}
}
// Uniform add: add partial sums array
__global__ void uniform_add(int *data, int *partial_sums, int len)
{
__shared__ int buf;
int id = ((blockIdx.x * blockDim.x) + threadIdx.x);
if (id > len) return;
if (threadIdx.x == 0)
{
buf = partial_sums[blockIdx.x];
}
__syncthreads();
data[id] += buf;
}
static unsigned int iDivUp(unsigned int dividend, unsigned int divisor)
{
return ((dividend % divisor) == 0) ?
(dividend / divisor) :
(dividend / divisor + 1);
}
bool CPUverify(int *h_data, int *h_result, int n_elements)
{
// cpu verify
for (int i = 0; i<n_elements - 1; i++)
{
h_data[i + 1] = h_data[i] + h_data[i + 1];
}
int diff = 0;
for (int i = 0; i<n_elements; i++)
{
diff += h_data[i] - h_result[i];
}
printf("CPU verify result diff (GPUvsCPU) = %d\n", diff);
bool bTestResult = false;
if (diff == 0) bTestResult = true;
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (int j = 0; j<100; j++)
for (int i = 0; i<n_elements - 1; i++)
{
h_data[i + 1] = h_data[i] + h_data[i + 1];
}
sdkStopTimer(&hTimer);
double cput = sdkGetTimerValue(&hTimer);
printf("CPU sum (naive) took %f ms\n", cput / 100);
return bTestResult;
}
bool shuffle_simple_test(int argc, char **argv)
{
int *h_data, *h_partial_sums, *h_result;
int *d_data, *d_partial_sums;
const int n_elements = 65036;
int sz = sizeof(int)*n_elements;
//int cuda_device = 0;
//printf("Starting shfl_scan\n");
//// use command-line specified CUDA device, otherwise use device with highest Gflops/s
//cuda_device = findCudaDevice(argc, (const char **)argv);
//hipDeviceProp_t deviceProp;
//checkCudaErrors(hipGetDevice(&cuda_device));
//checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device));
//printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
// deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
//// __shfl intrinsic needs SM 3.0 or higher
//if (deviceProp.major < 3)
//{
// printf("> __shfl() intrinsic requires device SM 3.0+\n");
// printf("> Waiving test.\n");
// exit(EXIT_WAIVED);
//}
checkCudaErrors(hipHostMalloc((void **)&h_data, sizeof(int)*n_elements));
checkCudaErrors(hipHostMalloc((void **)&h_result, sizeof(int)*n_elements));
//initialize data:
printf("Computing Simple Sum test\n");
printf("---------------------------------------------------\n");
printf("Initialize test data [1, 1, 1...]\n");
for (int i = 0; i<n_elements; i++)
{
h_data[i] = 1;
}
int blockSize = 256;
int gridSize = n_elements / blockSize + 1;
int nWarps = blockSize / 32;
int shmem_sz = nWarps * sizeof(int);
int n_partialSums = n_elements / blockSize + 1;
int partial_sz = n_partialSums*sizeof(int);
printf("Scan summation for %d elements, %d partial sums\n",
n_elements, n_elements / blockSize);
int p_blockSize = ::min(n_partialSums, blockSize);
int p_gridSize = iDivUp(n_partialSums, p_blockSize);
printf("Partial summing %d elements with %d blocks of size %d\n",
n_partialSums, p_gridSize, p_blockSize);
// initialize a timer
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
float et = 0;
float inc = 0;
checkCudaErrors(hipMalloc((void **)&d_data, sz));
checkCudaErrors(hipMalloc((void **)&d_partial_sums, partial_sz));
checkCudaErrors(hipMemset(d_partial_sums, 0, partial_sz));
checkCudaErrors(hipHostMalloc((void **)&h_partial_sums, partial_sz));
checkCudaErrors(hipMemcpy(d_data, h_data, sz, hipMemcpyHostToDevice));
checkCudaErrors(hipEventRecord(start, 0));
shfl_scan_test << <gridSize, blockSize, shmem_sz >> >(d_data, 32, d_partial_sums);
shfl_scan_test << <p_gridSize, p_blockSize, shmem_sz >> >(d_partial_sums, 32);
uniform_add << <gridSize - 1, blockSize >> >(d_data + blockSize, d_partial_sums, n_elements);
checkCudaErrors(hipEventRecord(stop, 0));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&inc, start, stop));
et += inc;
checkCudaErrors(hipMemcpy(h_result, d_data, sz, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_partial_sums, d_partial_sums, partial_sz,
hipMemcpyDeviceToHost));
printf("Test Sum: %d\n", h_partial_sums[n_partialSums - 1]);
printf("Time (ms): %f\n", et);
printf("%d elements scanned in %f ms -> %f MegaElements/s\n",
n_elements, et, n_elements / (et / 1000.0f) / 1000000.0f);
bool bTestResult = CPUverify(h_data, h_result, n_elements);
checkCudaErrors(hipHostFree(h_data));
checkCudaErrors(hipHostFree(h_result));
checkCudaErrors(hipHostFree(h_partial_sums));
checkCudaErrors(hipFree(d_data));
checkCudaErrors(hipFree(d_partial_sums));
return bTestResult;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
using namespace std;
#define INF 1000000000
#define BS 512
#define BEGIN_ATOMIC bool isSet = false; do { if (isSet = atomicCAS(mutex, 0, 1) == 0) {
#define END_ATOMIC }if (isSet){*mutex = 0;}} while (!isSet);
#define GET_THREAD_ID (blockIdx.x * blockDim.x + threadIdx.x);
#include "hip/hip_runtime_api.h"
#define CUDA_API_PER_THREAD_DEFAULT_STREAM
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
//if (abort) exit(code);
}
}
//! used for calculating time in CPU
double PCFreq = 0.0;
__int64 CounterStart = 0;
float minTime;
float maxTime;
//! starts the counter (for CPU)
void StartCounter()
{
LARGE_INTEGER li;
if (!QueryPerformanceFrequency(&li))
std::cout << "QueryPerformanceFrequency failed!\n";
PCFreq = double(li.QuadPart) / 1000.0;
QueryPerformanceCounter(&li);
CounterStart = li.QuadPart;
}
//! gives the elapse time from the call of StartCounter()
double GetCounter()
{
LARGE_INTEGER li;
QueryPerformanceCounter(&li);
return double(li.QuadPart - CounterStart) / PCFreq;
}
__device__ volatile int sem = 0;
__device__ void acquire_semaphore(volatile int *lock){
while (atomicCAS((int *)lock, 0, 1) != 0);
}
__device__ void release_semaphore(volatile int *lock){
*lock = 0;
__threadfence();
}
/**********************************************************************************************************************************/
__global__ void reweightKernel(int* bfWeights, int* d_edgeIndex, int* d_edges, int* in_costs, int* d_out_costs, int* numOfThreads)
{
unsigned int i = GET_THREAD_ID
__shared__ int shared_amca[1];
int* s_data = shared_amca;
if (threadIdx.x == 0)
s_data[0] = *numOfThreads;
__syncthreads();
if (i < s_data[0])
{
int edgeStart = d_edgeIndex[i];
int edgeEnd = d_edgeIndex[i + 1];
int u = bfWeights[i];
// for all successors of node i
for (int m = edgeStart; m < edgeEnd; m++)
{
int adj = d_edges[m]; // neighbor
int w = in_costs[m]; // its cost
int v = bfWeights[adj];
d_out_costs[m] = w + u - v;
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void spawnVertices(int *edgeIndex, int *edges, int * costs,
int* nodeW, int* nodeParent, int* itNo, int* source,
int* F1, int* F2, int *head1, int* head2, int currIt, int* mutex)
{
unsigned int i = GET_THREAD_ID
__shared__ int shared_amca[1];
int* s_data = shared_amca;
if (threadIdx.x == 0)
s_data[0] = *head1;
__syncthreads();
if (i < s_data[0])
{
int nodeIndex = F1[i];
int edgeStart = edgeIndex[nodeIndex];
int edgeEnd = edgeIndex[nodeIndex + 1];
for (int e = edgeStart; e < edgeEnd; e++)
{
int adj = edges[e];
//printf("%d\n", adj);
int newCost = nodeW[nodeIndex] + costs[e];
int outDegree = edgeIndex[adj + 1] - edgeIndex[adj];
if (nodeIndex == adj)
continue;
BEGIN_ATOMIC
if (newCost < nodeW[adj])
{
nodeW[adj] = newCost;
nodeParent[adj] = nodeIndex;
if (itNo[adj] != currIt && outDegree > 0){
//printf(" %d -- %d\n", adj, nodeIndex);
*(F2 + *head2) = adj;
*head2 += 1;
itNo[adj] = currIt;
}
}
END_ATOMIC
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void BF(int *edgeIndex, int *edges, int * costs,
int* nodeW, int* nodeParent, int* itNo, int* source,
int* F1, int* F2, int *head1, int* head2, int* mutex, int* n)
{
unsigned int i = GET_THREAD_ID
__shared__ int shared_amca[1];
int* s_data = shared_amca;
if (threadIdx.x == 0)
s_data[0] = *n;
__syncthreads();
if (i < s_data[0])
{
unsigned int s = *source;
//! initialize
if (i == s){
nodeW[i] = 0;
nodeParent[i] = -2;
}
else{
nodeW[i] = INF;
nodeParent[i] = -1;
}
itNo[i] = -1;
if (i == 0){
*(F1 + *head1) = s;
*head1 = 1;
}
__syncthreads();
if (i == 0){
int ss = 0;
while (true){
int h1 = *head1;
if (h1 == 0)
break;
int numOfThreads = BS;
int numOfBlocks = *head1 / numOfThreads + (*head1%numOfThreads == 0 ? 0 : 1);
//for (int q = 0; q < h1; q++)
//printf("%d ", F1[q]);
//printf("\n\n");
spawnVertices << <numOfBlocks, numOfThreads >> >(edgeIndex, edges, costs, nodeW, nodeParent, itNo, source, F1, F2, head1, head2, ss, mutex);
hipDeviceSynchronize();
int *temp = F1;
F1 = F2;
F2 = temp;
*head1 = *head2;
*head2 = 0;
ss++;
}
}
}
__syncthreads();
if (i == 0)
{
int threadsPerBlock = 512;
int numOfBlocks = s_data[0] / threadsPerBlock + (s_data[0] % threadsPerBlock == 0 ? 0 : 1);
reweightKernel << <numOfBlocks, threadsPerBlock >> > (nodeW, edgeIndex, edges, costs, costs, n);
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void relaxKernel(int* edgeIndex, int* edges, int*costs, int* nodeW, int* nodeParent, int* F, int* U, int* numOfThreads)
{
__shared__ int shared[1];
int* s_data = shared;
unsigned int i = GET_THREAD_ID
if (threadIdx.x == 0)
s_data[0] = *numOfThreads;
__syncthreads();
if (i < s_data[0])
{
if (F[i] == 1)
{
int edgeStart = edgeIndex[i];
int edgeEnd = edgeIndex[i + 1];
// for all successors of node i
for (int m = edgeStart; m < edgeEnd; m++)
{
int adj = edges[m]; // neighbor
int cost = costs[m]; // its cost
if (U[adj] == 1)
{
//nodeParent[adj] = i;
/* TODO : insan gibi atomic */
//BEGIN_ATOMIC
// get the minimum value for relaxing
atomicMin(nodeW + adj, nodeW[i] + cost);
//nodeW[adj] = nodeW[adj] < (nodeW[i] + cost) ? nodeW[adj] : (nodeW[i] + cost);
//END_ATOMIC
}
}
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void updateKernel(int* edgeIndex, int* edges, int*costs, int* nodeW, int* F, int* U, int* threshold, int* numOfThreads)
{
__shared__ int shared[1];
int* s_data = shared;
unsigned int i = GET_THREAD_ID
if (threadIdx.x == 0)
s_data[0] = *numOfThreads;
__syncthreads();
if (i < s_data[0])
{
F[i] = 0;
if (U[i] == 1 && nodeW[i] <= *threshold)
{
F[i] = 1;
U[i] = 0;
//printf(" %d\n", i);
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void updateKernelQ(int* edgeIndex, int* edges, int*costs, int* nodeW, int* F, int* headF, int* U, int* threshold, int* numOfThreads)
{
__shared__ int shared[1];
int* s_data = shared;
unsigned int i = GET_THREAD_ID
if (threadIdx.x == 0)
s_data[0] = *numOfThreads;
__syncthreads();
if (i < s_data[0])
{
if (i == 0)
{
*headF = 0;
}
__syncthreads();
if (U[i] == 1 && nodeW[i] <= *threshold)
{
U[i] = 0;
// atomicAdd(headF, 1);
atomicExch(F + atomicAdd(headF, 1), i);
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void relaxKernelQ(int* edgeIndex, int* edges, int*costs, int* nodeW, int* nodeParent, int* F, int* headF, int* U)
{
__shared__ int shared[1];
int* s_data = shared;
unsigned int i = GET_THREAD_ID
if (threadIdx.x == 0)
s_data[0] = *headF;
__syncthreads();
if (i < s_data[0])
{
int nodeIdx = F[i];
int edgeStart = edgeIndex[nodeIdx];
int edgeEnd = edgeIndex[nodeIdx + 1];
// for all successors of node i
for (int m = edgeStart; m < edgeEnd; m++)
{
int adj = edges[m]; // neighbor
int cost = costs[m]; // its cost
if (U[adj] == 1)
{
/* TODO : insan gibi atomic */
// BEGIN_ATOMIC
// get the minimum value for relaxing
nodeParent[adj] = nodeIdx;
//nodeW[adj] = nodeW[adj] < (nodeW[nodeIdx] + cost) ? nodeW[adj] : (nodeW[nodeIdx] + cost);
//END_ATOMIC
atomicMin(nodeW + adj, nodeW[nodeIdx] + cost);
}
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void computeDeltaUKernel(int *edgeIndex, int *edges, int * costs, int* deltas, int* numOfThreads)
{
__shared__ int shared[1];
int* s_data = shared;
unsigned int i = GET_THREAD_ID
if (threadIdx.x == 0)
s_data[0] = *numOfThreads;
__syncthreads();
if (i < s_data[0])
{
int edgeStart = edgeIndex[i];
int edgeEnd = edgeIndex[i + 1];
int minVal = INF;
// for all successors of node i
for (int m = edgeStart; m < edgeEnd; m++)
{
int cost = costs[m]; // its cost
minVal = minVal < cost ? minVal : cost;
}
deltas[i] = minVal;
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void
reduce3(int *g_idata, int *g_odata, unsigned int n, unsigned int n2)
{
extern __shared__ int s_type[];
int *sdata = s_type;
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
int myMin = (i < n) ? g_idata[i] : INF;
if (i + blockDim.x < n)
{
int tempMin = g_idata[i + blockDim.x];
myMin = myMin < tempMin ? myMin : tempMin;
}
sdata[tid] = myMin;
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s>0; s >>= 1)
{
if (tid < s)
{
int temp = sdata[tid + s];
sdata[tid] = myMin = myMin < temp ? myMin : temp;
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
__syncthreads();
// minnak version
if (blockIdx.x * blockDim.x + threadIdx.x == 0){
int minnak = g_odata[0];
for (int j = 1; j < n2; j++)
if (minnak > g_odata[j])
minnak = g_odata[j];
g_odata[0] = minnak;
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void minimumKernel(int *edgeIndex, int *edges, int * costs, int* deltas, int* U, int* nodeW, int* g_odata, int* numOfThreads)
{
unsigned int i = GET_THREAD_ID
unsigned int tid = threadIdx.x;
extern __shared__ int amca[];
int * s_data = amca;
if (i < *numOfThreads)
{
if (U[i] == 1)
{
if (deltas[i] == INF)
s_data[tid] = INF;
else
s_data[tid] = nodeW[i] + deltas[i];
}
else
{
s_data[tid] = INF;
}
}
else
{
s_data[tid] = INF;
}
__syncthreads();
// Reduce2 Cuda SDK
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
//printf("amca : %d\n", blockDim.x);
if (s_data[tid] > s_data[tid + s])
{
s_data[tid] = s_data[tid + s];
}
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
{
g_odata[blockIdx.x] = s_data[0];
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void fillQPrefix(int* F, int* Qcondition, int* prefixSum, int n_elements,int *headF)
{
unsigned int i = GET_THREAD_ID;
if (i < n_elements)
{
if (i == 0)
{
*headF = prefixSum[n_elements - 1];
}
if (Qcondition[i] == 1)
{
F[(prefixSum[i] - 1)] = i;
}
}
}
__global__ void updateKernelPrefix(int* edgeIndex, int* edges, int*costs, int* nodeW, int* F, int* headF, int* U, int* isInQ, int* partialSums, int* Qcondition, int* threshold, int* numOfThreads)
{
__shared__ int shared[1];
int nData = *numOfThreads;
unsigned int i = GET_THREAD_ID;
int* s_data = shared;
if (threadIdx.x == 0)
s_data[0] = nData;
__syncthreads();
bool cond_isInQ, cond_isInQ2;
if (i < s_data[0])
{
cond_isInQ = (U[i] == 1) && (nodeW[i] <= *threshold);
if (cond_isInQ)
{
U[i] = 0;
isInQ[i] = 1;
Qcondition[i] = 1;
}
else
{
isInQ[i] = 0;
Qcondition[i] = 0;
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void Dijkstra(int *edgeIndex, int *edges, int * costs,
int* nodeW, int* nodeParent, int* source, int* F, int* U, int* threshold, int* deltas, int* g_odata, int* numOfThreads)
{
__shared__ int shared[1];
int* s_data = shared;
unsigned int i = GET_THREAD_ID
if (threadIdx.x == 0)
s_data[0] = *numOfThreads;
__syncthreads();
if (i < s_data[0])
{
unsigned int s = *source;
//! initialize
if (i == s)
{
nodeW[i] = 0;
nodeParent[i] = -2;
U[i] = 0; // control
F[i] = 1;
}
else
{
nodeW[i] = INF;
nodeParent[i] = -1;
U[i] = 1;
F[i] = 0;
}
__syncthreads();
if (i == 0)
{
int threadsPerBlock = BS;
int numOfBlocks = *numOfThreads / threadsPerBlock + (*numOfThreads % threadsPerBlock == 0 ? 0 : 1);
hipStream_t s;
hipStreamCreateWithFlags(&s, hipStreamNonBlocking);
//threshold = INF;
while (true)
{
*threshold = INF;
relaxKernel << < numOfBlocks, threadsPerBlock, 0, s >> > (edgeIndex, edges, costs, nodeW, nodeParent, F, U, numOfThreads);
minimumKernel << < numOfBlocks, threadsPerBlock, 4096, s >> > (edgeIndex, edges, costs, deltas, U, nodeW, g_odata, numOfThreads);
int reduceTPB = 32;
int numOfBlocks2 = numOfBlocks / reduceTPB + (numOfBlocks % reduceTPB == 0 ? 0 : 1);
reduce3 << <numOfBlocks2, reduceTPB, 1024, s >> >(g_odata, threshold, numOfBlocks, numOfBlocks2);
updateKernel << < numOfBlocks, threadsPerBlock, 0, s >> >(edgeIndex, edges, costs, nodeW, F, U, threshold, numOfThreads);
hipDeviceSynchronize();
//printf("threshold = %f \n", *threshold);
if (*threshold == INF)
{
break;
}
//printf("\n*************************************************************************\n");
}
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void DijkstraQ(int *edgeIndex, int *edges, int * costs,
int* nodeW, int* nodeParent, int* source, int* F, int* headF, int* U, int* threshold, int* deltas, int* g_odata, int* numOfThreads)
{
__shared__ int shared[1];
int* s_data = shared;
unsigned int i = GET_THREAD_ID
if (threadIdx.x == 0)
s_data[0] = *numOfThreads;
__syncthreads();
if (i < s_data[0])
{
unsigned int s = *source;
//! initialize
if (i == s)
{
nodeW[i] = 0;
nodeParent[i] = -2;
U[i] = 0; // control
*headF = 0;
F[*headF] = i;
*headF = 1;
}
else
{
nodeW[i] = INF;
nodeParent[i] = -1;
U[i] = 1;
}
__syncthreads();
if (i == 0)
{
int threadsPerBlock = BS;
int numOfBlocks = *numOfThreads / threadsPerBlock + (*numOfThreads % threadsPerBlock == 0 ? 0 : 1);
//printf("numOfBlocks: %d \n", numOfBlocks);
computeDeltaUKernel << < numOfBlocks, threadsPerBlock >> >(edgeIndex, edges, costs, deltas, numOfThreads);
while (true)
{
*threshold = INF;
int threadsPerBlockQ = threadsPerBlock;
int numOfBlocksQ = *headF / threadsPerBlockQ + (*headF % threadsPerBlockQ == 0 ? 0 : 1);
relaxKernelQ << < numOfBlocksQ, threadsPerBlockQ >> >(edgeIndex, edges, costs, nodeW, nodeParent, F, headF, U);
minimumKernel << < numOfBlocks, threadsPerBlock, 16536 >> > (edgeIndex, edges, costs, deltas, U, nodeW, g_odata, numOfThreads);
int reduceTPB = 32;
int numOfBlocks2 = numOfBlocks / reduceTPB + (numOfBlocks % reduceTPB == 0 ? 0 : 1);
reduce3 << <numOfBlocks2, reduceTPB, 4096 >> >(g_odata, threshold, numOfBlocks, numOfBlocks2);
updateKernelQ << < numOfBlocks, threadsPerBlock >> >(edgeIndex, edges, costs, nodeW, F, headF, U, threshold, numOfThreads);
hipDeviceSynchronize();
if (*threshold == INF)
{
break;
}
}
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void DijkstraPrefix(int *edgeIndex, int *edges, int * costs,
int* nodeW, int* nodeParent, int* source, int* F, int* headF, int* U, int* isInQ, int* partialSums, int* Qcondition, int* threshold, int* deltas, int* g_odata, int* numOfThreads)
{
__shared__ int shared[1];
int* s_data = shared;
unsigned int i = GET_THREAD_ID;
/*if (i == 0)
{
printf("%d\n", *source);
}*/
if (threadIdx.x == 0)
s_data[0] = *numOfThreads;
__syncthreads();
if (i < s_data[0])
{
unsigned int s = *source;
//! initialize
if (i == s)
{
nodeW[i] = 0;
nodeParent[i] = -2;
U[i] = 0; // control
*headF = 0;
F[*headF] = i;
*headF = 1;
}
else
{
nodeW[i] = INF;
nodeParent[i] = -1;
U[i] = 1;
}
__syncthreads();
if (i == 0)
{
int threadsPerBlock = BS;
int numOfBlocks = *numOfThreads / threadsPerBlock + (*numOfThreads % threadsPerBlock == 0 ? 0 : 1);
//printf("numOfBlocks: %d \n", numOfBlocks);
computeDeltaUKernel << < numOfBlocks, threadsPerBlock >> >(edgeIndex, edges, costs, deltas, numOfThreads);
int n_elements = *numOfThreads;
int blockSize = BS;
int gridSize = n_elements / blockSize + ((n_elements % blockSize) == 0 ? 0 : 1);
int nWarps = blockSize / 32;
int shmem_sz = nWarps * sizeof(int);
int n_partialSums = gridSize;
int partial_sz = n_partialSums*sizeof(int);
int p_blockSize = (n_partialSums < blockSize) ? n_partialSums : blockSize;
int p_gridSize = ((n_partialSums % p_blockSize) == 0) ?
(n_partialSums / p_blockSize) :
(n_partialSums / p_blockSize + 1); //iDivUp(n_partialSums, p_blockSize);
while (true)
{
*threshold = INF;
int threadsPerBlockQ = threadsPerBlock;
int numOfBlocksQ = *headF / threadsPerBlockQ + (*headF % threadsPerBlockQ == 0 ? 0 : 1);
relaxKernelQ << < numOfBlocksQ, threadsPerBlockQ >> >(edgeIndex, edges, costs, nodeW, nodeParent, F, headF, U);
minimumKernel << < numOfBlocks, threadsPerBlock, 16536 >> > (edgeIndex, edges, costs, deltas, U, nodeW, g_odata, numOfThreads);
int reduceTPB = 32;
int numOfBlocks2 = numOfBlocks / reduceTPB + (numOfBlocks % reduceTPB == 0 ? 0 : 1);
reduce3 << <numOfBlocks2, reduceTPB, 4096 >> >(g_odata, threshold, numOfBlocks, numOfBlocks2);
hipDeviceSynchronize();
updateKernelPrefix << < numOfBlocks, threadsPerBlock >> >(edgeIndex, edges, costs, nodeW, F, headF, U, isInQ, partialSums, Qcondition, threshold, numOfThreads);
shfl_scan_test << <gridSize, blockSize, shmem_sz >> >(isInQ, 32, partialSums);
shfl_scan_test << <p_gridSize, p_blockSize, shmem_sz >> >(partialSums, 32);
uniform_add << <gridSize - 1, blockSize >> >(isInQ + blockSize, partialSums, n_elements);
fillQPrefix << <gridSize, blockSize >> >(F, Qcondition, isInQ, n_elements,headF);
hipDeviceSynchronize();
if (*threshold == INF)
{
//printf("%d %d\n", *headF, *threshold);
break;
}
}
}
}
}
__global__ void cudaWarmup()
{
int i = GET_THREAD_ID
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
struct Edge {
int head;
int cost;
};
using Graph = std::vector<std::vector<Edge>>;
using SingleSP = vector<int>;
using AllSP = vector<vector<int>>;
SingleSP djikstra(const Graph& g, int s) {
SingleSP dist(g.size(), INF);
set<pair<int, int>> frontier;
frontier.insert({ 0, s });
while (!frontier.empty()) {
pair<int, int> p = *frontier.begin();
frontier.erase(frontier.begin());
int d = p.first;
int n = p.second;
// this is our shortest path to n
dist[n] = d;
// now look at all edges out from n to update the frontier
for (auto e : g[n]) {
// update this node in the frontier if we have a shorter path
if (dist[n] + e.cost < dist[e.head]) {
if (dist[e.head] != INF) {
// we've seen this node before, so erase it from the set in order to update it
frontier.erase(frontier.find({ dist[e.head], e.head }));
}
frontier.insert({ dist[n] + e.cost, e.head });
dist[e.head] = dist[n] + e.cost;
}
}
}
return dist;
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
void GPUDijkstraQ(int edgeSize, int nodeSize, int source, int* d_edgeIndex, int* d_edges, int* d_costs, int* nodeW, int* nodeParent){
int * d_nodeW = 0;
int* d_nodeParent = 0;
int * d_headF = 0;
int * d_head2 = 0;
int* d_source = 0;
int* d_F = 0; // Frontier set
int* d_U = 0; // Unsettled set
int* d_threshold = 0;
int* d_deltas = 0;
int* g_odata = 0;
int* d_numOfThreads = 0;
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventRecord(start, 0);
int numOfThreads = 1024;
int numOfBlocks = nodeSize / numOfThreads + (nodeSize%numOfThreads == 0 ? 0 : 1);
hipMalloc((void**)&d_nodeW, sizeof(int) * nodeSize);
hipMalloc((void**)&d_source, sizeof(int));
hipMalloc((void**)&d_F, sizeof(int) * nodeSize);
hipMalloc((void**)&d_U, sizeof(int) * nodeSize);
hipMalloc((void**)&d_threshold, 512 * sizeof(int));
hipMalloc((void**)&d_deltas, sizeof(int) * nodeSize);
hipMalloc((void**)&g_odata, sizeof(int) * 1024/* blocksize max 1024*/);
hipMalloc((void**)&d_numOfThreads, sizeof(int));
hipMemcpy(d_source, &source, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_numOfThreads, &nodeSize, sizeof(int), hipMemcpyHostToDevice);
/* TEST DIJKSTRA WITH QUEUE */
DijkstraQ << <numOfBlocks, numOfThreads >> >(d_edgeIndex, d_edges, d_costs, d_nodeW, d_nodeParent, d_source, d_F, d_headF, d_U, d_threshold, d_deltas, g_odata, d_numOfThreads);
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time with Q: %lf ms\n", elapsedTime);
hipMemcpy(nodeW, d_nodeW, nodeSize*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(nodeParent, d_nodeParent, nodeSize*sizeof(int), hipMemcpyDeviceToHost);
std::cout << "**************************************" << std::endl;
for (int i = 0; i < 5; i++){
int next = nodeParent[i];
if (next == -1){
std::cout << "unreachable" << std::endl;
continue;;
}
std::cout << i << " ";
while (next != -2){
std::cout << next << " ";
next = nodeParent[next];
}
std::cout << " ----> " << nodeW[i];
std::cout << std::endl;
}
hipFree(d_source);
hipFree(d_nodeW);
hipFree(d_F);
hipFree(d_U);
hipFree(d_threshold);
hipFree(d_deltas);
hipFree(g_odata);
hipFree(d_numOfThreads);
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
void GPUDijkstra(int edgeSize, int nodeSize, int source, int* d_edgeIndex, int* d_edges, int* d_costs, int* nodeW, int* nodeParent){
int * d_nodeW = 0;
int* d_nodeParent = 0;
int* d_source = 0;
int* d_F = 0; // Frontier set
int* d_U = 0; // Unsettled set
int* d_threshold = 0;
int* d_deltas = 0;
int* g_odata = 0;
int* d_numOfThreads = 0;
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventRecord(start, 0);
int numOfThreads = BS;
int numOfBlocks = nodeSize / numOfThreads + (nodeSize%numOfThreads == 0 ? 0 : 1);
hipMalloc((void**)&d_nodeW, sizeof(int) * nodeSize);
hipMalloc((void**)&d_nodeParent, sizeof(int) * nodeSize);
hipMalloc((void**)&d_source, sizeof(int));
hipMalloc((void**)&d_F, sizeof(int) * nodeSize);
hipMalloc((void**)&d_U, sizeof(int) * nodeSize);
hipMalloc((void**)&d_threshold, sizeof(int));
hipMalloc((void**)&d_deltas, sizeof(int) * nodeSize);
hipMalloc((void**)&g_odata, sizeof(int) * 1024/* blocksize max 1024*/);
hipMalloc((void**)&d_numOfThreads, sizeof(int));
hipMemcpy(d_source, &source, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_numOfThreads, &nodeSize, sizeof(int), hipMemcpyHostToDevice);
/* RUN DIJKSTRA*/
Dijkstra << <numOfBlocks, numOfThreads >> >(d_edgeIndex, d_edges, d_costs, d_nodeW, d_nodeParent, d_source, d_F, d_U, d_threshold, d_deltas, g_odata, d_numOfThreads);
hipMemcpy(nodeW, d_nodeW, nodeSize*sizeof(int), hipMemcpyDeviceToHost);
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %lf ms\n", elapsedTime);
hipFree(d_source);
hipFree(d_nodeW);
hipFree(d_nodeParent);
hipFree(d_F);
hipFree(d_U);
hipFree(d_threshold);
hipFree(d_deltas);
hipFree(g_odata);
hipFree(d_numOfThreads);
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
void oneGPUDijkstra(int edgeSize, int nodeSize, int source, int head1, int head2, int mutex, int* d_edgeIndex, int* d_edges, int* d_costs, int* d_deltas, vector<int*>& allWeights, hipStream_t* stream){
int* nodeW = allWeights[0];
int* nodeParent = 0;
int * d_nodeW = 0;
int* d_nodeParent = 0;
int* d_source = 0;
int* d_F = 0; // Frontier set
int* d_U = 0; // Unsettled set
int* d_threshold = 0;
int* g_odata = 0;
int* d_numOfThreads = 0;
int numOfThreads = BS;
int numOfBlocks = nodeSize / numOfThreads + (nodeSize%numOfThreads == 0 ? 0 : 1);
hipMalloc((void**)&d_nodeW, sizeof(int) * nodeSize);
hipMalloc((void**)&d_nodeParent, sizeof(int) * nodeSize);
hipMalloc((void**)&d_source, sizeof(int));
hipMalloc((void**)&d_F, sizeof(int) * nodeSize);
hipMalloc((void**)&d_U, sizeof(int) * nodeSize);
hipMalloc((void**)&d_threshold, sizeof(int));
hipMalloc((void**)&g_odata, sizeof(int) * 1024/* blocksize max 1024*/);
hipMalloc((void**)&d_numOfThreads, sizeof(int));
hipMemcpyAsync(d_source, &source, sizeof(int), hipMemcpyHostToDevice, *stream);
hipMemcpyAsync(d_numOfThreads, &nodeSize, sizeof(int), hipMemcpyHostToDevice, *stream);
/* RUN DIJKSTRA */
Dijkstra << <numOfBlocks, numOfThreads, 0, *stream >> >(d_edgeIndex, d_edges, d_costs, d_nodeW, d_nodeParent, d_source, d_F, d_U, d_threshold, d_deltas, g_odata, d_numOfThreads);
hipMemcpyAsync(nodeW, d_nodeW, nodeSize*sizeof(int), hipMemcpyDeviceToHost, *stream);
//hipMemcpy(nodeParent, d_nodeParent, nodeSize*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_source);
hipFree(d_nodeW);
hipFree(d_nodeParent);
hipFree(d_F);
hipFree(d_U);
hipFree(d_threshold);
hipFree(g_odata);
hipFree(d_numOfThreads);
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
void oneGPUDijkstraQ(int edgeSize, int nodeSize, int source, int* d_edgeIndex, int* d_edges, int* d_costs, int* d_deltas, vector<int*>& allWeights, hipStream_t* stream){
//int* nodeW = allWeights[source];
int* nodeW = allWeights[0];
int* nodeParent = 0;
int * d_nodeW = 0;
int* d_nodeParent = 0;
int* d_source = 0;
int* d_F = 0; // Frontier set
int* d_headF = 0;
int* d_U = 0; // Unsettled set
int* d_threshold = 0;
int* g_odata = 0;
int* d_numOfThreads = 0;
int numOfThreads = BS;
int numOfBlocks = nodeSize / numOfThreads + (nodeSize%numOfThreads == 0 ? 0 : 1);
hipMalloc((void**)&d_nodeW, sizeof(int) * nodeSize);
hipMalloc((void**)&d_nodeParent, sizeof(int) * nodeSize);
hipMalloc((void**)&d_source, sizeof(int));
hipMalloc((void**)&d_F, sizeof(int) * nodeSize);
hipMalloc((void**)&d_U, sizeof(int) * nodeSize);
hipMalloc((void**)&d_headF, sizeof(int));
hipMalloc((void**)&d_threshold, sizeof(int));
hipMalloc((void**)&g_odata, sizeof(int) * 1024/* blocksize max 1024*/);
hipMalloc((void**)&d_numOfThreads, sizeof(int));
hipMemcpyAsync(d_source, &source, sizeof(int), hipMemcpyHostToDevice, *stream);
hipMemcpyAsync(d_numOfThreads, &nodeSize, sizeof(int), hipMemcpyHostToDevice, *stream);
/* RUN DIJKSTRA WITH QUEUE */
DijkstraQ << <numOfBlocks, numOfThreads, 0, *stream >> >(d_edgeIndex, d_edges, d_costs, d_nodeW, d_nodeParent, d_source, d_F, d_headF, d_U, d_threshold, d_deltas, g_odata, d_numOfThreads);
//hipDeviceSynchronize();
hipMemcpyAsync(nodeW, d_nodeW, nodeSize*sizeof(int), hipMemcpyDeviceToHost, *stream);
/*hipDeviceSynchronize();
cout << source << endl;
for (int i = 0; i < nodeSize; i++)
cout << allWeights[source][i] << " ";
cout << endl;*/
//hipMemcpy(nodeParent, d_nodeParent, nodeSize*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_source);
hipFree(d_nodeW);
hipFree(d_nodeParent);
hipFree(d_F);
hipFree(d_U);
hipFree(d_threshold);
hipFree(g_odata);
hipFree(d_numOfThreads);
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
void oneGPUDijkstraQVerify(int edgeSize, int nodeSize, int source, int* d_edgeIndex, int* d_edges, int* d_costs, int* d_deltas, vector<int*>& allWeights, hipStream_t* stream){
int* nodeW = allWeights[source];
int* nodeParent = 0;
int * d_nodeW = 0;
int* d_nodeParent = 0;
int* d_source = 0;
int* d_F = 0; // Frontier set
int* d_headF = 0;
int* d_U = 0; // Unsettled set
int* d_threshold = 0;
int* g_odata = 0;
int* d_numOfThreads = 0;
int numOfThreads = BS;
int numOfBlocks = nodeSize / numOfThreads + (nodeSize%numOfThreads == 0 ? 0 : 1);
hipMalloc((void**)&d_nodeW, sizeof(int) * nodeSize);
hipMalloc((void**)&d_nodeParent, sizeof(int) * nodeSize);
hipMalloc((void**)&d_source, sizeof(int));
hipMalloc((void**)&d_F, sizeof(int) * nodeSize);
hipMalloc((void**)&d_U, sizeof(int) * nodeSize);
hipMalloc((void**)&d_headF, sizeof(int));
hipMalloc((void**)&d_threshold, sizeof(int));
hipMalloc((void**)&g_odata, sizeof(int) * 1024/* blocksize max 1024*/);
hipMalloc((void**)&d_numOfThreads, sizeof(int));
hipMemcpyAsync(d_source, &source, sizeof(int), hipMemcpyHostToDevice, *stream);
hipMemcpyAsync(d_numOfThreads, &nodeSize, sizeof(int), hipMemcpyHostToDevice, *stream);
/* RUN DIJKSTRA WITH QUEUE */
DijkstraQ << <numOfBlocks, numOfThreads, 0, *stream >> >(d_edgeIndex, d_edges, d_costs, d_nodeW, d_nodeParent, d_source, d_F, d_headF, d_U, d_threshold, d_deltas, g_odata, d_numOfThreads);
//hipDeviceSynchronize();
hipMemcpyAsync(nodeW, d_nodeW, nodeSize*sizeof(int), hipMemcpyDeviceToHost, *stream);
/*hipDeviceSynchronize();
cout << source << endl;
for (int i = 0; i < nodeSize; i++)
cout << allWeights[source][i] << " ";
cout << endl;*/
//hipMemcpy(nodeParent, d_nodeParent, nodeSize*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_source);
hipFree(d_nodeW);
hipFree(d_nodeParent);
hipFree(d_F);
hipFree(d_U);
hipFree(d_threshold);
hipFree(g_odata);
hipFree(d_numOfThreads);
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
void oneGPUDijkstraPrefix(int edgeSize, int nodeSize, int source, int* d_edgeIndex, int* d_edges, int* d_costs, int* d_deltas, vector<int*>& allWeights, hipStream_t* stream){
//int* nodeW = allWeights[source];
int* nodeW = allWeights[0];
int* nodeParent = 0;
int * d_nodeW = 0;
int* d_nodeParent = 0;
int* d_source = 0;
int* d_F = 0; // Frontier set
int* d_headF = 0;
int* d_U = 0; // Unsettled set
int* d_threshold = 0;
int* g_odata = 0;
int* d_numOfThreads = 0;
int* d_isInQ = 0;
int* d_partialSums = 0;
int* d_Qcondition = 0;
int numOfThreads = BS;
int numOfBlocks = nodeSize / numOfThreads + (nodeSize%numOfThreads == 0 ? 0 : 1);
hipMalloc((void**)&d_nodeW, sizeof(int) * nodeSize);
hipMalloc((void**)&d_nodeParent, sizeof(int) * nodeSize);
hipMalloc((void**)&d_source, sizeof(int));
hipMalloc((void**)&d_F, sizeof(int) * nodeSize);
hipMalloc((void**)&d_U, sizeof(int) * nodeSize);
hipMalloc((void**)&d_headF, sizeof(int));
hipMalloc((void**)&d_threshold, sizeof(int));
hipMalloc((void**)&g_odata, sizeof(int) * 1024/* blocksize max 1024*/);
hipMalloc((void**)&d_numOfThreads, sizeof(int));
hipMalloc((void**)&d_Qcondition, sizeof(int) * nodeSize);
hipMalloc((void**)&d_isInQ, sizeof(int) * nodeSize);
int n_partialSums = nodeSize / BS + (nodeSize % BS == 0 ? 0 : 1);
hipMalloc((void**)&d_partialSums, sizeof(int) * n_partialSums);
hipMemcpyAsync(d_source, &source, sizeof(int), hipMemcpyHostToDevice, *stream);
hipMemcpyAsync(d_numOfThreads, &nodeSize, sizeof(int), hipMemcpyHostToDevice, *stream);
/* RUN DIJKSTRA WITH QUEUE */
DijkstraPrefix << <numOfBlocks, numOfThreads, 0, *stream >> >(d_edgeIndex, d_edges, d_costs, d_nodeW, d_nodeParent, d_source, d_F, d_headF, d_U, d_isInQ, d_partialSums, d_Qcondition, d_threshold, d_deltas, g_odata, d_numOfThreads);
hipMemcpyAsync(nodeW, d_nodeW, nodeSize*sizeof(int), hipMemcpyDeviceToHost, *stream);
/*hipDeviceSynchronize();
cout << source << endl;
for (int i = 0; i < nodeSize; i++)
cout << allWeights[source][i] << " ";
cout << endl;*/
//hipMemcpy(nodeParent, d_nodeParent, nodeSize*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_source);
hipFree(d_nodeW);
hipFree(d_nodeParent);
hipFree(d_F);
hipFree(d_headF);
hipFree(d_U);
hipFree(d_threshold);
hipFree(g_odata);
hipFree(d_numOfThreads);
hipFree(d_isInQ);
hipFree(d_Qcondition);
hipFree(d_partialSums);
}
void oneGPUDijkstraPrefixVerify(int edgeSize, int nodeSize, int source, int* d_edgeIndex, int* d_edges, int* d_costs, int* d_deltas, vector<int*>& allWeights, hipStream_t* stream){
int* nodeW = allWeights[source];
//int* nodeW = allWeights[0];
int* nodeParent = 0;
int * d_nodeW = 0;
int* d_nodeParent = 0;
int* d_source = 0;
int* d_F = 0; // Frontier set
int* d_headF = 0;
int* d_U = 0; // Unsettled set
int* d_threshold = 0;
int* g_odata = 0;
int* d_numOfThreads = 0;
int* d_isInQ = 0;
int* d_partialSums = 0;
int* d_Qcondition = 0;
int numOfThreads = BS;
int numOfBlocks = nodeSize / numOfThreads + (nodeSize%numOfThreads == 0 ? 0 : 1);
hipMalloc((void**)&d_nodeW, sizeof(int) * nodeSize);
hipMalloc((void**)&d_nodeParent, sizeof(int) * nodeSize);
hipMalloc((void**)&d_source, sizeof(int));
hipMalloc((void**)&d_F, sizeof(int) * nodeSize);
hipMalloc((void**)&d_U, sizeof(int) * nodeSize);
hipMalloc((void**)&d_headF, sizeof(int));
hipMalloc((void**)&d_threshold, sizeof(int));
hipMalloc((void**)&g_odata, sizeof(int) * 1024/* blocksize max 1024*/);
hipMalloc((void**)&d_numOfThreads, sizeof(int));
hipMalloc((void**)&d_Qcondition, sizeof(int) * nodeSize);
hipMalloc((void**)&d_isInQ, sizeof(int) * nodeSize);
int n_partialSums = nodeSize / BS + (nodeSize % BS == 0 ? 0 : 1);
hipMalloc((void**)&d_partialSums, sizeof(int) * n_partialSums);
hipMemcpyAsync(d_source, &source, sizeof(int), hipMemcpyHostToDevice, *stream);
hipMemcpyAsync(d_numOfThreads, &nodeSize, sizeof(int), hipMemcpyHostToDevice, *stream);
/* RUN DIJKSTRA WITH QUEUE */
DijkstraPrefix << <numOfBlocks, numOfThreads, 0, *stream >> >(d_edgeIndex, d_edges, d_costs, d_nodeW, d_nodeParent, d_source, d_F, d_headF, d_U, d_isInQ, d_partialSums, d_Qcondition, d_threshold, d_deltas, g_odata, d_numOfThreads);
//hipDeviceSynchronize();
hipMemcpyAsync(nodeW, d_nodeW, nodeSize*sizeof(int), hipMemcpyDeviceToHost, *stream);
/*hipDeviceSynchronize();
cout << source << endl;
for (int i = 0; i < nodeSize; i++)
cout << allWeights[source][i] << " ";
cout << endl;*/
//hipMemcpy(nodeParent, d_nodeParent, nodeSize*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_source);
hipFree(d_nodeW);
hipFree(d_nodeParent);
hipFree(d_F);
hipFree(d_U);
hipFree(d_threshold);
hipFree(g_odata);
hipFree(d_numOfThreads);
}
void Johnson1(int* outW, int* edgeIndex, int* edges, int* costs, int nodeSize, int edgeSize){
int source = nodeSize;
edgeSize += nodeSize;
nodeSize++;
int head1 = 0;
int head2 = 0;
int mutex = 0;
int* d_nodeW;
int* d_nodeParent;
int* F1 = 0;
int* F2 = 0;
int * d_head1 = 0;
int * d_head2 = 0;
int* d_itNo = 0;
int* d_source = 0;
int* d_mutex = 0;
int *d_numOfThreads = 0;
hipMalloc((void**)&F1, sizeof(int) * nodeSize);
hipMalloc((void**)&F2, sizeof(int) * nodeSize);
hipMalloc((void**)&d_itNo, sizeof(int) * nodeSize);
hipMalloc((void**)&d_head1, sizeof(int));
hipMalloc((void**)&d_head2, sizeof(int));
hipMalloc((void**)&d_source, sizeof(int));
hipMalloc((void**)&d_mutex, sizeof(int));
hipMalloc((void**)&d_nodeParent, sizeof(int) * nodeSize);
hipMalloc((void**)&d_numOfThreads, sizeof(int));
hipMalloc((void**)&d_nodeW, sizeof(int) * nodeSize);
int* d_edgeIndex, *d_edges, *d_costs;
hipMalloc((void**)&d_edgeIndex, sizeof(int) * (nodeSize + 1));
hipMalloc((void**)&d_edges, sizeof(int) * edgeSize);
hipMalloc((void**)&d_costs, sizeof(int) * edgeSize);
hipMemcpy(d_edgeIndex, edgeIndex, sizeof(int) * (nodeSize + 1), hipMemcpyHostToDevice);
hipMemcpy(d_edges, edges, sizeof(int) * (edgeSize), hipMemcpyHostToDevice);
hipMemcpy(d_costs, costs, sizeof(int) * (edgeSize), hipMemcpyHostToDevice);
hipMemcpy(d_head1, &head1, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_head2, &head2, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_source, &source, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_mutex, &mutex, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_numOfThreads, &nodeSize, sizeof(int), hipMemcpyHostToDevice);
int numOfThreads = BS;
int numOfBlocks = nodeSize / numOfThreads + (nodeSize%numOfThreads == 0 ? 0 : 1);
BF << <numOfBlocks, numOfThreads >> >(d_edgeIndex, d_edges, d_costs, d_nodeW, d_nodeParent, d_itNo, d_source, F1, F2, d_head1, d_head2, d_mutex, d_numOfThreads);
hipMemcpy(costs, d_costs, edgeSize*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(outW, d_nodeW, (nodeSize)*sizeof(int), hipMemcpyDeviceToHost);
//for (int i = 0; i < nodeSize; i++)
// cout << outW[i]<<" " ;
//cout << endl << endl;
//for (int i = 0; i < edgeSize ; i++)
// cout <<costs[i]<<" ";
//cout << endl << endl;
}
Graph addZeroEdge(Graph g) {
// add a zero-cost edge from vertex 0 to all other edges
for (int i = 1; i < g.size(); i++) {
g[0].push_back({ i, 0 });
}
return g;
}
SingleSP bellmanford(Graph &g, int s) {
vector<vector<int>> memo(1, vector<int>(g.size(), INF));
// initialise base case
memo[0][s] = 0;
for (int i = 1; i < memo.size(); i++) {
// compute shortest paths from s to all vertices, with max hop-count i
for (int n = 0; n < g.size(); n++) {
/* if (memo[0][n] < memo[0][n]) {
memo[0][n] = memo[0][n];
}*/
for (auto& e : g[n]) {
if (memo[0][n] != INF) {
if (memo[0][n] + e.cost < memo[0][e.head]) {
memo[0][e.head] = memo[0][n] + e.cost;
}
}
}
}
}
// check if the last iteration differed from the 2nd-last
/*for (int j = 0; j < g.size(); j++) {
if (memo[g.size() + 1][j] != memo[g.size()][j]) {
throw string{ "negative cycle found" };
}
}*/
return memo[0];
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
// CPU - GPU verification
int main1()
{
srand(time(NULL));
int edgeSize = 12;
int nodeSize = 9;
int source = 0;
int head1 = 0;
int head2 = 0;
int mutex = 0;
std::ifstream f;
string graphName = "graph1.txt" ;
//string graphName = "graph2.txt";
//string graphName = "graph3.txt";
//string graphName = "graph4.txt";
//string graphName = "graph5.txt";
//string graphName = "graph6.txt";
//string graphName = "graph7.txt";
//string graphName = "graph8.txt";
//string graphName = "graph9.txt";
//std::string graphName = "50k_1m.txt";
f.open(graphName);
std::cout << graphName << std::endl;
if (!f.is_open())
{
std::cout << "File not found!" << std::endl;
getchar();
return -1;
}
f >> nodeSize;
f >> edgeSize;
cout << edgeSize << " " << nodeSize << endl;
int* edgeIndex, *edges, *costs;
hipHostMalloc((void**)&edgeIndex, (nodeSize + 2)*sizeof(int));
hipHostMalloc((void**)&edges, (edgeSize + nodeSize)*sizeof(int));
hipHostMalloc((void**)&costs, (edgeSize + nodeSize)*sizeof(int));
int* nodeW = new int[nodeSize];
int* nodeParent = new int[nodeSize];
/*******************/
Graph g;
g.resize(nodeSize);
/******************/
std::vector<std::vector<int>> edgesVector;
edgesVector.resize(nodeSize + 1);
std::vector<std::vector<int>> costsVector;
costsVector.resize(nodeSize + 1);
for (int i = 0; i < edgeSize; i++){
int from, to;
int cost;
f >> from;
f >> to;
//from--;
//to--;
f >> cost;
//cost = rand() % 10 + 1;
edgesVector[from].push_back(to);
costsVector[from].push_back(cost);
/***********/
Edge e;
e.head = to;
e.cost = cost;
g[from].push_back(e);
/***********/
}
for (int i = 0; i < nodeSize; i++){
edgesVector[nodeSize].push_back(i);
costsVector[nodeSize].push_back(0);
}
int offset = 0;
for (int i = 0; i < nodeSize; i++){
edgeIndex[i] = offset;
//printf("%d", offset);
int end = offset + edgesVector[i].size();
for (int j = offset; j < end; j++){
edges[j] = edgesVector[i][j - offset];
costs[j] = costsVector[i][j - offset];
}
offset = end;
}
edgeIndex[nodeSize] = edgeSize;
for (int i = edgeSize; i < edgeSize + nodeSize; i++){
edges[i] = edgesVector[nodeSize][i - edgeSize];
costs[i] = costsVector[nodeSize][i - edgeSize];
}
edgeIndex[nodeSize + 1] = edgeSize + nodeSize;
f.close();
//GPUDijkstraQ(edgeSize, nodeSize, source, head1, head2, mutex, edgeIndex, edges, costs, nodeW, nodeParent);
//GPUDijkstra(edgeSize, nodeSize, source, head1, head2, mutex, edgeIndex, edges, costs, nodeW, nodeParent);
vector<int*> allWeights;
//for (int w = 0; w < nodeSize; w++)
//{
// int* amca = new int[nodeSize + 1];
// allWeights.push_back(amca);
//}
for (int w = 0; w < nodeSize; w++)
{
int* amca = new int[nodeSize + 1];
allWeights.push_back(amca);
}
int* d_edgeIndex, *d_edges, *d_costs;
hipMalloc((void**)&d_edgeIndex, sizeof(int) * (nodeSize + 1));
hipMalloc((void**)&d_edges, sizeof(int) * edgeSize);
hipMalloc((void**)&d_costs, sizeof(int) * edgeSize);
const int numOfStreams = 1;
hipStream_t streams[numOfStreams];
for (int i = 0; i < numOfStreams; i++)
{
hipStreamCreate(&streams[i]);
}
hipMemcpy(d_edgeIndex, edgeIndex, sizeof(int) * (nodeSize + 1), hipMemcpyHostToDevice);
hipMemcpy(d_edges, edges, sizeof(int) * (edgeSize), hipMemcpyHostToDevice);
//GPUDijkstra(edgeSize, nodeSize, source, head1, head2, mutex, edgeIndex, edges, costs, nodeW, nodeParent);
//GPUDijkstra(edgeSize, nodeSize, source, head1, head2, mutex, edgeIndex, edges, costs, allWeights[0], nodeParent);
//oneGPUDijkstra(edgeSize, nodeSize, 0, head1, head2, mutex, edgeIndex, edges, costs, allWeights);
//hipProfilerStart();
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventRecord(start, 0);
Johnson1(nodeW, edgeIndex, edges, costs, nodeSize, edgeSize);
hipMemcpy(d_costs, costs, sizeof(int) * (edgeSize), hipMemcpyHostToDevice);
int* d_deltas = 0;
int* d_numOfThreads = 0;
hipMalloc((void**)&d_numOfThreads, sizeof(int));
hipMalloc((void**)&d_deltas, sizeof(int) * nodeSize);
hipMemcpy(d_numOfThreads, &nodeSize, sizeof(int) * (edgeSize), hipMemcpyHostToDevice);
hipDeviceSynchronize();
int threadsPerBlock = BS;
int numOfBlocks = nodeSize / threadsPerBlock + (nodeSize % threadsPerBlock == 0 ? 0 : 1);
computeDeltaUKernel << < numOfBlocks, threadsPerBlock >> >(d_edgeIndex, d_edges, d_costs, d_deltas, d_numOfThreads);
hipDeviceSynchronize();
for (int n = 0; n < nodeSize; n++)
{
//hipDeviceSynchronize();
//std::cout << n << std::endl;
//oneGPUDijkstra(edgeSize, nodeSize, n, head1, head2, mutex, d_edgeIndex, d_edges, d_costs, d_deltas, allWeights, &streams[n%numOfStreams]);
//oneGPUDijkstraQVerify(edgeSize, nodeSize, n, d_edgeIndex, d_edges, d_costs, d_deltas, allWeights, &streams[n%numOfStreams]);
oneGPUDijkstraPrefixVerify(edgeSize, nodeSize, n, d_edgeIndex, d_edges, d_costs, d_deltas, allWeights, &streams[n%numOfStreams]);
//std::cout << n << std::endl;
}
cout << "GPU done" << endl;
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %lf ms\n", elapsedTime);
StartCounter();
Graph gprime = addZeroEdge(g);
SingleSP ssp;
try {
ssp = bellmanford(gprime, 0);
}
catch (string e) {
cout << "Negative cycles found in graph. Cannot compute shortest paths." << endl;
throw e;
}
for (int i = 1; i < g.size(); i++) {
for (auto &e : g[i]) {
e.cost = e.cost + ssp[i] - ssp[e.head];
}
}
AllSP allsp(g.size());
for (int i = 0; i < g.size(); i++) {
allsp[i] = djikstra(g, i);
}
cout << "CPU Time: " << GetCounter() << endl;
//cout << "GPU Matrix:" << endl;
//for (unsigned int i = 0; i < 1; i++){
// for (unsigned int j = 0; j < allWeights.size(); j++)
// cout << allWeights[i][j] << " ";
// cout << endl;
//}
/*
cout << "CPU Matrix:" << endl;
cout << endl << endl;
for (unsigned int i = 0; i < allsp.size(); i++){
for (unsigned int j = 0; j < allsp[i].size(); j++)
cout << allsp[i][j] << " ";
cout << endl;
}*/
int count = 0;
bool succes = true;
for (unsigned int i = 0; i < allWeights.size(); i++){
for (unsigned int j = 0; j < allWeights.size(); j++){
//cout << allsp[i][j] << " " << allWeights[i][j] << endl;
if (allsp[i][j] != allWeights[i][j]){
succes = false;
count++;
//cout << i << endl;
//cout << "***************************" << endl;
}
}
}
if (succes)
std::cout << "successful" << std::endl;
else
std::cout << "fail" << std::endl;
if (count)
cout << count<< endl;
getchar();
//delete[] edgeIndex;
//delete[] edges;
//delete[] costs;
hipHostFree(edgeIndex);
hipHostFree(edges);
hipHostFree(costs);
//delete[] nodeW;
//delete[] nodeParent;
//delete[] streams;
return 0;
}
//! gpu performance
int main()
{
srand(time(NULL));
int edgeSize = 12;
int nodeSize = 9;
int source = 0;
int head1 = 0;
int head2 = 0;
int mutex = 0;
std::ifstream f;
//string graphName = "graph1.txt";
//string graphName = "graph2.txt";
//string graphName = "graph3.txt";
string graphName = "graph4.txt";
//string graphName = "graph5.txt";
//string graphName = "graph6.txt";
//string graphName = "graph7.txt";
//string graphName = "graph8.txt";
f.open(graphName);
std::cout << graphName << std::endl;
if (!f.is_open())
{
std::cout << "File not found!" << std::endl;
getchar();
return -1;
}
f >> nodeSize;
f >> edgeSize;
cout << edgeSize << " " << nodeSize << endl;
int* edgeIndex, *edges, *costs;
hipHostMalloc((void**)&edgeIndex, (nodeSize + 2)*sizeof(int));
hipHostMalloc((void**)&edges, (edgeSize + nodeSize)*sizeof(int));
hipHostMalloc((void**)&costs, (edgeSize + nodeSize)*sizeof(int));
int* nodeW = new int[nodeSize];
int* nodeParent = new int[nodeSize];
/*******************/
Graph g;
g.resize(nodeSize);
/******************/
std::vector<std::vector<int>> edgesVector;
edgesVector.resize(nodeSize + 1);
std::vector<std::vector<int>> costsVector;
costsVector.resize(nodeSize + 1);
for (int i = 0; i < edgeSize; i++){
int from, to;
int cost;
f >> from;
f >> to;
//from--;
//to--;
f >> cost;
//cost = rand() % 10 + 1;
edgesVector[from].push_back(to);
costsVector[from].push_back(cost);
/***********/
Edge e;
e.head = to;
e.cost = cost;
g[from].push_back(e);
/***********/
}
for (int i = 0; i < nodeSize; i++){
edgesVector[nodeSize].push_back(i);
costsVector[nodeSize].push_back(0);
}
int offset = 0;
for (int i = 0; i < nodeSize; i++){
edgeIndex[i] = offset;
//printf("%d", offset);
int end = offset + edgesVector[i].size();
for (int j = offset; j < end; j++){
edges[j] = edgesVector[i][j - offset];
costs[j] = costsVector[i][j - offset];
}
offset = end;
}
edgeIndex[nodeSize] = edgeSize;
for (int i = edgeSize; i < edgeSize + nodeSize; i++){
edges[i] = edgesVector[nodeSize][i - edgeSize];
costs[i] = costsVector[nodeSize][i - edgeSize];
}
edgeIndex[nodeSize + 1] = edgeSize + nodeSize;
f.close();
vector<int*> allWeights;
for (int w = 0; w < 1; w++)
{
int* amca = new int[nodeSize + 1];
allWeights.push_back(amca);
}
int* d_edgeIndex, *d_edges, *d_costs;
hipMalloc((void**)&d_edgeIndex, sizeof(int) * (nodeSize + 1));
hipMalloc((void**)&d_edges, sizeof(int) * edgeSize);
hipMalloc((void**)&d_costs, sizeof(int) * edgeSize);
const int numOfStreams = 1;
hipStream_t streams[numOfStreams];
for (int i = 0; i < numOfStreams; i++)
{
hipStreamCreate(&streams[i]);
}
hipMemcpy(d_edgeIndex, edgeIndex, sizeof(int) * (nodeSize + 1), hipMemcpyHostToDevice);
hipMemcpy(d_edges, edges, sizeof(int) * (edgeSize), hipMemcpyHostToDevice);
//hipProfilerStart();
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventRecord(start, 0);
Johnson1(nodeW, edgeIndex, edges, costs, nodeSize, edgeSize);
hipMemcpy(d_costs, costs, sizeof(int) * (edgeSize), hipMemcpyHostToDevice);
int* d_deltas = 0;
int* d_numOfThreads = 0;
hipMalloc((void**)&d_numOfThreads, sizeof(int));
hipMalloc((void**)&d_deltas, sizeof(int) * nodeSize);
hipMemcpy(d_numOfThreads, &nodeSize, sizeof(int) * (edgeSize), hipMemcpyHostToDevice);
hipDeviceSynchronize();
int threadsPerBlock = BS;
int numOfBlocks = nodeSize / threadsPerBlock + (nodeSize % threadsPerBlock == 0 ? 0 : 1);
computeDeltaUKernel << < numOfBlocks, threadsPerBlock >> >(d_edgeIndex, d_edges, d_costs, d_deltas, d_numOfThreads);
hipDeviceSynchronize();
for (int n = 0; n <nodeSize; n++)
{
oneGPUDijkstra(edgeSize, nodeSize, n, head1, head2, mutex, d_edgeIndex, d_edges, d_costs, d_deltas, allWeights, &streams[n%numOfStreams]);
//oneGPUDijkstraQ(edgeSize, nodeSize, n, d_edgeIndex, d_edges, d_costs, d_deltas, allWeights, &streams[n%numOfStreams]);
//oneGPUDijkstraPrefix(edgeSize, nodeSize, n, d_edgeIndex, d_edges, d_costs, d_deltas, allWeights, &streams[n%numOfStreams]);
}
cout << "done" << endl;
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %lf ms\n", elapsedTime);
getchar();
//delete[] edgeIndex;
//delete[] edges;
//delete[] costs;
hipHostFree(edgeIndex);
hipHostFree(edges);
hipHostFree(costs);
//delete[] nodeW;
//delete[] nodeParent;
//delete[] streams;
return 0;
}
//! cpu performance
int main2()
{
srand(time(NULL));
int edgeSize = 12;
int nodeSize = 9;
int source = 0;
int head1 = 0;
int head2 = 0;
int mutex = 0;
std::ifstream f;
//string graphName = "graph1.txt";
//string graphName = "graph2.txt";
//string graphName = "graph3.txt";
//string graphName = "graph4.txt";
//string graphName = "graph5.txt";
//string graphName = "graph6.txt";
//string graphName = "graph7.txt";
string graphName = "graph8.txt";
//std::string graphName = "50k_1m.txt";
f.open(graphName);
std::cout << graphName << std::endl;
if (!f.is_open())
{
std::cout << "File not found!" << std::endl;
getchar();
return -1;
}
f >> nodeSize;
f >> edgeSize;
cout << edgeSize << " " << nodeSize << endl;
int* edgeIndex, *edges, *costs;
hipHostMalloc((void**)&edgeIndex, (nodeSize + 2)*sizeof(int));
hipHostMalloc((void**)&edges, (edgeSize + nodeSize)*sizeof(int));
hipHostMalloc((void**)&costs, (edgeSize + nodeSize)*sizeof(int));
int* nodeW = new int[nodeSize];
int* nodeParent = new int[nodeSize];
/*******************/
Graph g;
g.resize(nodeSize);
/******************/
std::vector<std::vector<int>> edgesVector;
edgesVector.resize(nodeSize + 1);
std::vector<std::vector<int>> costsVector;
costsVector.resize(nodeSize + 1);
for (int i = 0; i < edgeSize; i++){
int from, to;
int cost;
f >> from;
f >> to;
//from--;
//to--;
//f >> cost;
cost = rand() % 10 + 1;
edgesVector[from].push_back(to);
costsVector[from].push_back(cost);
/***********/
Edge e;
e.head = to;
e.cost = cost;
g[from].push_back(e);
/***********/
}
for (int i = 0; i < nodeSize; i++){
edgesVector[nodeSize].push_back(i);
costsVector[nodeSize].push_back(0);
}
int offset = 0;
for (int i = 0; i < nodeSize; i++){
edgeIndex[i] = offset;
//printf("%d", offset);
int end = offset + edgesVector[i].size();
for (int j = offset; j < end; j++){
edges[j] = edgesVector[i][j - offset];
costs[j] = costsVector[i][j - offset];
}
offset = end;
}
edgeIndex[nodeSize] = edgeSize;
for (int i = edgeSize; i < edgeSize + nodeSize; i++){
edges[i] = edgesVector[nodeSize][i - edgeSize];
costs[i] = costsVector[nodeSize][i - edgeSize];
}
edgeIndex[nodeSize + 1] = edgeSize + nodeSize;
f.close();
StartCounter();
Graph gprime = addZeroEdge(g);
SingleSP ssp;
try {
ssp = bellmanford(gprime, 0);
}
catch (string e) {
cout << "Negative cycles found in graph. Cannot compute shortest paths." << endl;
throw e;
}
for (int i = 1; i < g.size(); i++) {
for (auto &e : g[i]) {
e.cost = e.cost + ssp[i] - ssp[e.head];
}
}
AllSP allsp(1);
for (int i = 0; i < g.size(); i++) {
allsp[0] = djikstra(g, i);
}
cout << "CPU Time: " << GetCounter() << endl;
getchar();
//delete[] edgeIndex;
//delete[] edges;
//delete[] costs;
hipHostFree(edgeIndex);
hipHostFree(edges);
hipHostFree(costs);
//delete[] nodeW;
//delete[] nodeParent;
//delete[] streams;
return 0;
}
__global__ void prescan(float *g_odata, float *g_idata, int *n)
{
int thid = threadIdx.x;
int offset = 1;
extern __shared__ float temp[]; // allocated on invocation
temp[2 * thid] = g_idata[2 * thid]; // load input into shared memory
temp[2 * thid + 1] = g_idata[2 * thid + 1];
for (int d = *n >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2 * thid + 1) - 1;
int bi = offset*(2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) { temp[*n - 1] = 0; } // clear the last element
for (int d1 = 1; d1 < *n; d1 *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d1)
{
int ai = offset*(2 * thid + 1) - 1;
int bi = offset*(2 * thid + 2) - 1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[2 * thid] = temp[2 * thid]; // write results to device memory
g_odata[2 * thid + 1] = temp[2 * thid + 1];
}
int makeItPowerOf2(int size){
int powerOfTwo = 1;
while (size > powerOfTwo){
powerOfTwo *= 2;
}
return powerOfTwo;
}
//! prefix sum test
int main4(int argc, char *argv[])
{
// Initialization. The shuffle intrinsic is not available on SM < 3.0
// so waive the test if the hardware is not present.
// int cuda_device = 0;
printf("Starting shfl_scan\n");
//// use command-line specified CUDA device, otherwise use device with highest Gflops/s
//cuda_device = findCudaDevice(argc, (const char **)argv);
//hipDeviceProp_t deviceProp;
//checkCudaErrors(hipGetDevice(&cuda_device));
//checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device));
//printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
// deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
//// __shfl intrinsic needs SM 3.0 or higher
//if (deviceProp.major < 3)
//{
// printf("> __shfl() intrinsic requires device SM 3.0+\n");
// printf("> Waiving test.\n");
// exit(EXIT_WAIVED);
//}
bool bTestResult = true;
bool simpleTest = shuffle_simple_test(argc, argv);
// bool intTest = shuffle_integral_image_test();
// bTestResult = simpleTest & intTest;
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
getchar();
hipDeviceReset();
exit((bTestResult) ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
94abe6c63ce2e0221bc948223d457b04c6f51db9.cu
|
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <limits>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <set>
#include <utility>
#include <cfloat>
#include <time.h>
#include <windows.h>
#include <string>
#include "helper_functions.h"
#include "helper_cuda.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void shfl_scan_test(int *data, int width, int *partial_sums = NULL)
{
extern __shared__ int sums[];
int id = ((blockIdx.x * blockDim.x) + threadIdx.x);
int lane_id = id % warpSize;
// determine a warp_id within a block
int warp_id = threadIdx.x / warpSize;
// Below is the basic structure of using a shfl instruction
// for a scan.
// Record "value" as a variable - we accumulate it along the way
int value = data[id];
// Now accumulate in log steps up the chain
// compute sums, with another thread's value who is
// distance delta away (i). Note
// those threads where the thread 'i' away would have
// been out of bounds of the warp are unaffected. This
// creates the scan sum.
#pragma unroll
for (int i = 1; i <= width; i *= 2)
{
int n = __shfl_up(value, i, width);
if (lane_id >= i) value += n;
}
// value now holds the scan value for the individual thread
// next sum the largest values for each warp
// write the sum of the warp to smem
if (threadIdx.x % warpSize == warpSize - 1)
{
sums[warp_id] = value;
}
__syncthreads();
//
// scan sum the warp sums
// the same shfl scan operation, but performed on warp sums
//
if (warp_id == 0 && lane_id < (blockDim.x / warpSize))
{
int warp_sum = sums[lane_id];
for (int i = 1; i <= width; i *= 2)
{
int n = __shfl_up(warp_sum, i, width);
if (lane_id >= i) warp_sum += n;
}
sums[lane_id] = warp_sum;
}
__syncthreads();
// perform a uniform add across warps in the block
// read neighbouring warp's sum and add it to threads value
int blockSum = 0;
if (warp_id > 0)
{
blockSum = sums[warp_id - 1];
}
value += blockSum;
// Now write out our result
data[id] = value;
// last thread has sum, write write out the block's sum
if (partial_sums != NULL && threadIdx.x == blockDim.x - 1)
{
partial_sums[blockIdx.x] = value;
}
}
// Uniform add: add partial sums array
__global__ void uniform_add(int *data, int *partial_sums, int len)
{
__shared__ int buf;
int id = ((blockIdx.x * blockDim.x) + threadIdx.x);
if (id > len) return;
if (threadIdx.x == 0)
{
buf = partial_sums[blockIdx.x];
}
__syncthreads();
data[id] += buf;
}
static unsigned int iDivUp(unsigned int dividend, unsigned int divisor)
{
return ((dividend % divisor) == 0) ?
(dividend / divisor) :
(dividend / divisor + 1);
}
bool CPUverify(int *h_data, int *h_result, int n_elements)
{
// cpu verify
for (int i = 0; i<n_elements - 1; i++)
{
h_data[i + 1] = h_data[i] + h_data[i + 1];
}
int diff = 0;
for (int i = 0; i<n_elements; i++)
{
diff += h_data[i] - h_result[i];
}
printf("CPU verify result diff (GPUvsCPU) = %d\n", diff);
bool bTestResult = false;
if (diff == 0) bTestResult = true;
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (int j = 0; j<100; j++)
for (int i = 0; i<n_elements - 1; i++)
{
h_data[i + 1] = h_data[i] + h_data[i + 1];
}
sdkStopTimer(&hTimer);
double cput = sdkGetTimerValue(&hTimer);
printf("CPU sum (naive) took %f ms\n", cput / 100);
return bTestResult;
}
bool shuffle_simple_test(int argc, char **argv)
{
int *h_data, *h_partial_sums, *h_result;
int *d_data, *d_partial_sums;
const int n_elements = 65036;
int sz = sizeof(int)*n_elements;
//int cuda_device = 0;
//printf("Starting shfl_scan\n");
//// use command-line specified CUDA device, otherwise use device with highest Gflops/s
//cuda_device = findCudaDevice(argc, (const char **)argv);
//cudaDeviceProp deviceProp;
//checkCudaErrors(cudaGetDevice(&cuda_device));
//checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device));
//printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
// deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
//// __shfl intrinsic needs SM 3.0 or higher
//if (deviceProp.major < 3)
//{
// printf("> __shfl() intrinsic requires device SM 3.0+\n");
// printf("> Waiving test.\n");
// exit(EXIT_WAIVED);
//}
checkCudaErrors(cudaMallocHost((void **)&h_data, sizeof(int)*n_elements));
checkCudaErrors(cudaMallocHost((void **)&h_result, sizeof(int)*n_elements));
//initialize data:
printf("Computing Simple Sum test\n");
printf("---------------------------------------------------\n");
printf("Initialize test data [1, 1, 1...]\n");
for (int i = 0; i<n_elements; i++)
{
h_data[i] = 1;
}
int blockSize = 256;
int gridSize = n_elements / blockSize + 1;
int nWarps = blockSize / 32;
int shmem_sz = nWarps * sizeof(int);
int n_partialSums = n_elements / blockSize + 1;
int partial_sz = n_partialSums*sizeof(int);
printf("Scan summation for %d elements, %d partial sums\n",
n_elements, n_elements / blockSize);
int p_blockSize = std::min(n_partialSums, blockSize);
int p_gridSize = iDivUp(n_partialSums, p_blockSize);
printf("Partial summing %d elements with %d blocks of size %d\n",
n_partialSums, p_gridSize, p_blockSize);
// initialize a timer
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
float et = 0;
float inc = 0;
checkCudaErrors(cudaMalloc((void **)&d_data, sz));
checkCudaErrors(cudaMalloc((void **)&d_partial_sums, partial_sz));
checkCudaErrors(cudaMemset(d_partial_sums, 0, partial_sz));
checkCudaErrors(cudaMallocHost((void **)&h_partial_sums, partial_sz));
checkCudaErrors(cudaMemcpy(d_data, h_data, sz, cudaMemcpyHostToDevice));
checkCudaErrors(cudaEventRecord(start, 0));
shfl_scan_test << <gridSize, blockSize, shmem_sz >> >(d_data, 32, d_partial_sums);
shfl_scan_test << <p_gridSize, p_blockSize, shmem_sz >> >(d_partial_sums, 32);
uniform_add << <gridSize - 1, blockSize >> >(d_data + blockSize, d_partial_sums, n_elements);
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&inc, start, stop));
et += inc;
checkCudaErrors(cudaMemcpy(h_result, d_data, sz, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_partial_sums, d_partial_sums, partial_sz,
cudaMemcpyDeviceToHost));
printf("Test Sum: %d\n", h_partial_sums[n_partialSums - 1]);
printf("Time (ms): %f\n", et);
printf("%d elements scanned in %f ms -> %f MegaElements/s\n",
n_elements, et, n_elements / (et / 1000.0f) / 1000000.0f);
bool bTestResult = CPUverify(h_data, h_result, n_elements);
checkCudaErrors(cudaFreeHost(h_data));
checkCudaErrors(cudaFreeHost(h_result));
checkCudaErrors(cudaFreeHost(h_partial_sums));
checkCudaErrors(cudaFree(d_data));
checkCudaErrors(cudaFree(d_partial_sums));
return bTestResult;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
using namespace std;
#define INF 1000000000
#define BS 512
#define BEGIN_ATOMIC bool isSet = false; do { if (isSet = atomicCAS(mutex, 0, 1) == 0) {
#define END_ATOMIC }if (isSet){*mutex = 0;}} while (!isSet);
#define GET_THREAD_ID (blockIdx.x * blockDim.x + threadIdx.x);
#include "cuda_profiler_api.h"
#define CUDA_API_PER_THREAD_DEFAULT_STREAM
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
//if (abort) exit(code);
}
}
//! used for calculating time in CPU
double PCFreq = 0.0;
__int64 CounterStart = 0;
float minTime;
float maxTime;
//! starts the counter (for CPU)
void StartCounter()
{
LARGE_INTEGER li;
if (!QueryPerformanceFrequency(&li))
std::cout << "QueryPerformanceFrequency failed!\n";
PCFreq = double(li.QuadPart) / 1000.0;
QueryPerformanceCounter(&li);
CounterStart = li.QuadPart;
}
//! gives the elapse time from the call of StartCounter()
double GetCounter()
{
LARGE_INTEGER li;
QueryPerformanceCounter(&li);
return double(li.QuadPart - CounterStart) / PCFreq;
}
__device__ volatile int sem = 0;
__device__ void acquire_semaphore(volatile int *lock){
while (atomicCAS((int *)lock, 0, 1) != 0);
}
__device__ void release_semaphore(volatile int *lock){
*lock = 0;
__threadfence();
}
/**********************************************************************************************************************************/
__global__ void reweightKernel(int* bfWeights, int* d_edgeIndex, int* d_edges, int* in_costs, int* d_out_costs, int* numOfThreads)
{
unsigned int i = GET_THREAD_ID
__shared__ int shared_amca[1];
int* s_data = shared_amca;
if (threadIdx.x == 0)
s_data[0] = *numOfThreads;
__syncthreads();
if (i < s_data[0])
{
int edgeStart = d_edgeIndex[i];
int edgeEnd = d_edgeIndex[i + 1];
int u = bfWeights[i];
// for all successors of node i
for (int m = edgeStart; m < edgeEnd; m++)
{
int adj = d_edges[m]; // neighbor
int w = in_costs[m]; // its cost
int v = bfWeights[adj];
d_out_costs[m] = w + u - v;
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void spawnVertices(int *edgeIndex, int *edges, int * costs,
int* nodeW, int* nodeParent, int* itNo, int* source,
int* F1, int* F2, int *head1, int* head2, int currIt, int* mutex)
{
unsigned int i = GET_THREAD_ID
__shared__ int shared_amca[1];
int* s_data = shared_amca;
if (threadIdx.x == 0)
s_data[0] = *head1;
__syncthreads();
if (i < s_data[0])
{
int nodeIndex = F1[i];
int edgeStart = edgeIndex[nodeIndex];
int edgeEnd = edgeIndex[nodeIndex + 1];
for (int e = edgeStart; e < edgeEnd; e++)
{
int adj = edges[e];
//printf("%d\n", adj);
int newCost = nodeW[nodeIndex] + costs[e];
int outDegree = edgeIndex[adj + 1] - edgeIndex[adj];
if (nodeIndex == adj)
continue;
BEGIN_ATOMIC
if (newCost < nodeW[adj])
{
nodeW[adj] = newCost;
nodeParent[adj] = nodeIndex;
if (itNo[adj] != currIt && outDegree > 0){
//printf(" %d -- %d\n", adj, nodeIndex);
*(F2 + *head2) = adj;
*head2 += 1;
itNo[adj] = currIt;
}
}
END_ATOMIC
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void BF(int *edgeIndex, int *edges, int * costs,
int* nodeW, int* nodeParent, int* itNo, int* source,
int* F1, int* F2, int *head1, int* head2, int* mutex, int* n)
{
unsigned int i = GET_THREAD_ID
__shared__ int shared_amca[1];
int* s_data = shared_amca;
if (threadIdx.x == 0)
s_data[0] = *n;
__syncthreads();
if (i < s_data[0])
{
unsigned int s = *source;
//! initialize
if (i == s){
nodeW[i] = 0;
nodeParent[i] = -2;
}
else{
nodeW[i] = INF;
nodeParent[i] = -1;
}
itNo[i] = -1;
if (i == 0){
*(F1 + *head1) = s;
*head1 = 1;
}
__syncthreads();
if (i == 0){
int ss = 0;
while (true){
int h1 = *head1;
if (h1 == 0)
break;
int numOfThreads = BS;
int numOfBlocks = *head1 / numOfThreads + (*head1%numOfThreads == 0 ? 0 : 1);
//for (int q = 0; q < h1; q++)
//printf("%d ", F1[q]);
//printf("\n\n");
spawnVertices << <numOfBlocks, numOfThreads >> >(edgeIndex, edges, costs, nodeW, nodeParent, itNo, source, F1, F2, head1, head2, ss, mutex);
cudaDeviceSynchronize();
int *temp = F1;
F1 = F2;
F2 = temp;
*head1 = *head2;
*head2 = 0;
ss++;
}
}
}
__syncthreads();
if (i == 0)
{
int threadsPerBlock = 512;
int numOfBlocks = s_data[0] / threadsPerBlock + (s_data[0] % threadsPerBlock == 0 ? 0 : 1);
reweightKernel << <numOfBlocks, threadsPerBlock >> > (nodeW, edgeIndex, edges, costs, costs, n);
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void relaxKernel(int* edgeIndex, int* edges, int*costs, int* nodeW, int* nodeParent, int* F, int* U, int* numOfThreads)
{
__shared__ int shared[1];
int* s_data = shared;
unsigned int i = GET_THREAD_ID
if (threadIdx.x == 0)
s_data[0] = *numOfThreads;
__syncthreads();
if (i < s_data[0])
{
if (F[i] == 1)
{
int edgeStart = edgeIndex[i];
int edgeEnd = edgeIndex[i + 1];
// for all successors of node i
for (int m = edgeStart; m < edgeEnd; m++)
{
int adj = edges[m]; // neighbor
int cost = costs[m]; // its cost
if (U[adj] == 1)
{
//nodeParent[adj] = i;
/* TODO : insan gibi atomic */
//BEGIN_ATOMIC
// get the minimum value for relaxing
atomicMin(nodeW + adj, nodeW[i] + cost);
//nodeW[adj] = nodeW[adj] < (nodeW[i] + cost) ? nodeW[adj] : (nodeW[i] + cost);
//END_ATOMIC
}
}
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void updateKernel(int* edgeIndex, int* edges, int*costs, int* nodeW, int* F, int* U, int* threshold, int* numOfThreads)
{
__shared__ int shared[1];
int* s_data = shared;
unsigned int i = GET_THREAD_ID
if (threadIdx.x == 0)
s_data[0] = *numOfThreads;
__syncthreads();
if (i < s_data[0])
{
F[i] = 0;
if (U[i] == 1 && nodeW[i] <= *threshold)
{
F[i] = 1;
U[i] = 0;
//printf(" %d\n", i);
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void updateKernelQ(int* edgeIndex, int* edges, int*costs, int* nodeW, int* F, int* headF, int* U, int* threshold, int* numOfThreads)
{
__shared__ int shared[1];
int* s_data = shared;
unsigned int i = GET_THREAD_ID
if (threadIdx.x == 0)
s_data[0] = *numOfThreads;
__syncthreads();
if (i < s_data[0])
{
if (i == 0)
{
*headF = 0;
}
__syncthreads();
if (U[i] == 1 && nodeW[i] <= *threshold)
{
U[i] = 0;
// atomicAdd(headF, 1);
atomicExch(F + atomicAdd(headF, 1), i);
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void relaxKernelQ(int* edgeIndex, int* edges, int*costs, int* nodeW, int* nodeParent, int* F, int* headF, int* U)
{
__shared__ int shared[1];
int* s_data = shared;
unsigned int i = GET_THREAD_ID
if (threadIdx.x == 0)
s_data[0] = *headF;
__syncthreads();
if (i < s_data[0])
{
int nodeIdx = F[i];
int edgeStart = edgeIndex[nodeIdx];
int edgeEnd = edgeIndex[nodeIdx + 1];
// for all successors of node i
for (int m = edgeStart; m < edgeEnd; m++)
{
int adj = edges[m]; // neighbor
int cost = costs[m]; // its cost
if (U[adj] == 1)
{
/* TODO : insan gibi atomic */
// BEGIN_ATOMIC
// get the minimum value for relaxing
nodeParent[adj] = nodeIdx;
//nodeW[adj] = nodeW[adj] < (nodeW[nodeIdx] + cost) ? nodeW[adj] : (nodeW[nodeIdx] + cost);
//END_ATOMIC
atomicMin(nodeW + adj, nodeW[nodeIdx] + cost);
}
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void computeDeltaUKernel(int *edgeIndex, int *edges, int * costs, int* deltas, int* numOfThreads)
{
__shared__ int shared[1];
int* s_data = shared;
unsigned int i = GET_THREAD_ID
if (threadIdx.x == 0)
s_data[0] = *numOfThreads;
__syncthreads();
if (i < s_data[0])
{
int edgeStart = edgeIndex[i];
int edgeEnd = edgeIndex[i + 1];
int minVal = INF;
// for all successors of node i
for (int m = edgeStart; m < edgeEnd; m++)
{
int cost = costs[m]; // its cost
minVal = minVal < cost ? minVal : cost;
}
deltas[i] = minVal;
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void
reduce3(int *g_idata, int *g_odata, unsigned int n, unsigned int n2)
{
extern __shared__ int s_type[];
int *sdata = s_type;
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
int myMin = (i < n) ? g_idata[i] : INF;
if (i + blockDim.x < n)
{
int tempMin = g_idata[i + blockDim.x];
myMin = myMin < tempMin ? myMin : tempMin;
}
sdata[tid] = myMin;
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s>0; s >>= 1)
{
if (tid < s)
{
int temp = sdata[tid + s];
sdata[tid] = myMin = myMin < temp ? myMin : temp;
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
__syncthreads();
// minnak version
if (blockIdx.x * blockDim.x + threadIdx.x == 0){
int minnak = g_odata[0];
for (int j = 1; j < n2; j++)
if (minnak > g_odata[j])
minnak = g_odata[j];
g_odata[0] = minnak;
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void minimumKernel(int *edgeIndex, int *edges, int * costs, int* deltas, int* U, int* nodeW, int* g_odata, int* numOfThreads)
{
unsigned int i = GET_THREAD_ID
unsigned int tid = threadIdx.x;
extern __shared__ int amca[];
int * s_data = amca;
if (i < *numOfThreads)
{
if (U[i] == 1)
{
if (deltas[i] == INF)
s_data[tid] = INF;
else
s_data[tid] = nodeW[i] + deltas[i];
}
else
{
s_data[tid] = INF;
}
}
else
{
s_data[tid] = INF;
}
__syncthreads();
// Reduce2 Cuda SDK
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
//printf("amca : %d\n", blockDim.x);
if (s_data[tid] > s_data[tid + s])
{
s_data[tid] = s_data[tid + s];
}
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
{
g_odata[blockIdx.x] = s_data[0];
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void fillQPrefix(int* F, int* Qcondition, int* prefixSum, int n_elements,int *headF)
{
unsigned int i = GET_THREAD_ID;
if (i < n_elements)
{
if (i == 0)
{
*headF = prefixSum[n_elements - 1];
}
if (Qcondition[i] == 1)
{
F[(prefixSum[i] - 1)] = i;
}
}
}
__global__ void updateKernelPrefix(int* edgeIndex, int* edges, int*costs, int* nodeW, int* F, int* headF, int* U, int* isInQ, int* partialSums, int* Qcondition, int* threshold, int* numOfThreads)
{
__shared__ int shared[1];
int nData = *numOfThreads;
unsigned int i = GET_THREAD_ID;
int* s_data = shared;
if (threadIdx.x == 0)
s_data[0] = nData;
__syncthreads();
bool cond_isInQ, cond_isInQ2;
if (i < s_data[0])
{
cond_isInQ = (U[i] == 1) && (nodeW[i] <= *threshold);
if (cond_isInQ)
{
U[i] = 0;
isInQ[i] = 1;
Qcondition[i] = 1;
}
else
{
isInQ[i] = 0;
Qcondition[i] = 0;
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void Dijkstra(int *edgeIndex, int *edges, int * costs,
int* nodeW, int* nodeParent, int* source, int* F, int* U, int* threshold, int* deltas, int* g_odata, int* numOfThreads)
{
__shared__ int shared[1];
int* s_data = shared;
unsigned int i = GET_THREAD_ID
if (threadIdx.x == 0)
s_data[0] = *numOfThreads;
__syncthreads();
if (i < s_data[0])
{
unsigned int s = *source;
//! initialize
if (i == s)
{
nodeW[i] = 0;
nodeParent[i] = -2;
U[i] = 0; // control
F[i] = 1;
}
else
{
nodeW[i] = INF;
nodeParent[i] = -1;
U[i] = 1;
F[i] = 0;
}
__syncthreads();
if (i == 0)
{
int threadsPerBlock = BS;
int numOfBlocks = *numOfThreads / threadsPerBlock + (*numOfThreads % threadsPerBlock == 0 ? 0 : 1);
cudaStream_t s;
cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking);
//threshold = INF;
while (true)
{
*threshold = INF;
relaxKernel << < numOfBlocks, threadsPerBlock, 0, s >> > (edgeIndex, edges, costs, nodeW, nodeParent, F, U, numOfThreads);
minimumKernel << < numOfBlocks, threadsPerBlock, 4096, s >> > (edgeIndex, edges, costs, deltas, U, nodeW, g_odata, numOfThreads);
int reduceTPB = 32;
int numOfBlocks2 = numOfBlocks / reduceTPB + (numOfBlocks % reduceTPB == 0 ? 0 : 1);
reduce3 << <numOfBlocks2, reduceTPB, 1024, s >> >(g_odata, threshold, numOfBlocks, numOfBlocks2);
updateKernel << < numOfBlocks, threadsPerBlock, 0, s >> >(edgeIndex, edges, costs, nodeW, F, U, threshold, numOfThreads);
cudaDeviceSynchronize();
//printf("threshold = %f \n", *threshold);
if (*threshold == INF)
{
break;
}
//printf("\n*************************************************************************\n");
}
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void DijkstraQ(int *edgeIndex, int *edges, int * costs,
int* nodeW, int* nodeParent, int* source, int* F, int* headF, int* U, int* threshold, int* deltas, int* g_odata, int* numOfThreads)
{
__shared__ int shared[1];
int* s_data = shared;
unsigned int i = GET_THREAD_ID
if (threadIdx.x == 0)
s_data[0] = *numOfThreads;
__syncthreads();
if (i < s_data[0])
{
unsigned int s = *source;
//! initialize
if (i == s)
{
nodeW[i] = 0;
nodeParent[i] = -2;
U[i] = 0; // control
*headF = 0;
F[*headF] = i;
*headF = 1;
}
else
{
nodeW[i] = INF;
nodeParent[i] = -1;
U[i] = 1;
}
__syncthreads();
if (i == 0)
{
int threadsPerBlock = BS;
int numOfBlocks = *numOfThreads / threadsPerBlock + (*numOfThreads % threadsPerBlock == 0 ? 0 : 1);
//printf("numOfBlocks: %d \n", numOfBlocks);
computeDeltaUKernel << < numOfBlocks, threadsPerBlock >> >(edgeIndex, edges, costs, deltas, numOfThreads);
while (true)
{
*threshold = INF;
int threadsPerBlockQ = threadsPerBlock;
int numOfBlocksQ = *headF / threadsPerBlockQ + (*headF % threadsPerBlockQ == 0 ? 0 : 1);
relaxKernelQ << < numOfBlocksQ, threadsPerBlockQ >> >(edgeIndex, edges, costs, nodeW, nodeParent, F, headF, U);
minimumKernel << < numOfBlocks, threadsPerBlock, 16536 >> > (edgeIndex, edges, costs, deltas, U, nodeW, g_odata, numOfThreads);
int reduceTPB = 32;
int numOfBlocks2 = numOfBlocks / reduceTPB + (numOfBlocks % reduceTPB == 0 ? 0 : 1);
reduce3 << <numOfBlocks2, reduceTPB, 4096 >> >(g_odata, threshold, numOfBlocks, numOfBlocks2);
updateKernelQ << < numOfBlocks, threadsPerBlock >> >(edgeIndex, edges, costs, nodeW, F, headF, U, threshold, numOfThreads);
cudaDeviceSynchronize();
if (*threshold == INF)
{
break;
}
}
}
}
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
__global__ void DijkstraPrefix(int *edgeIndex, int *edges, int * costs,
int* nodeW, int* nodeParent, int* source, int* F, int* headF, int* U, int* isInQ, int* partialSums, int* Qcondition, int* threshold, int* deltas, int* g_odata, int* numOfThreads)
{
__shared__ int shared[1];
int* s_data = shared;
unsigned int i = GET_THREAD_ID;
/*if (i == 0)
{
printf("%d\n", *source);
}*/
if (threadIdx.x == 0)
s_data[0] = *numOfThreads;
__syncthreads();
if (i < s_data[0])
{
unsigned int s = *source;
//! initialize
if (i == s)
{
nodeW[i] = 0;
nodeParent[i] = -2;
U[i] = 0; // control
*headF = 0;
F[*headF] = i;
*headF = 1;
}
else
{
nodeW[i] = INF;
nodeParent[i] = -1;
U[i] = 1;
}
__syncthreads();
if (i == 0)
{
int threadsPerBlock = BS;
int numOfBlocks = *numOfThreads / threadsPerBlock + (*numOfThreads % threadsPerBlock == 0 ? 0 : 1);
//printf("numOfBlocks: %d \n", numOfBlocks);
computeDeltaUKernel << < numOfBlocks, threadsPerBlock >> >(edgeIndex, edges, costs, deltas, numOfThreads);
int n_elements = *numOfThreads;
int blockSize = BS;
int gridSize = n_elements / blockSize + ((n_elements % blockSize) == 0 ? 0 : 1);
int nWarps = blockSize / 32;
int shmem_sz = nWarps * sizeof(int);
int n_partialSums = gridSize;
int partial_sz = n_partialSums*sizeof(int);
int p_blockSize = (n_partialSums < blockSize) ? n_partialSums : blockSize;
int p_gridSize = ((n_partialSums % p_blockSize) == 0) ?
(n_partialSums / p_blockSize) :
(n_partialSums / p_blockSize + 1); //iDivUp(n_partialSums, p_blockSize);
while (true)
{
*threshold = INF;
int threadsPerBlockQ = threadsPerBlock;
int numOfBlocksQ = *headF / threadsPerBlockQ + (*headF % threadsPerBlockQ == 0 ? 0 : 1);
relaxKernelQ << < numOfBlocksQ, threadsPerBlockQ >> >(edgeIndex, edges, costs, nodeW, nodeParent, F, headF, U);
minimumKernel << < numOfBlocks, threadsPerBlock, 16536 >> > (edgeIndex, edges, costs, deltas, U, nodeW, g_odata, numOfThreads);
int reduceTPB = 32;
int numOfBlocks2 = numOfBlocks / reduceTPB + (numOfBlocks % reduceTPB == 0 ? 0 : 1);
reduce3 << <numOfBlocks2, reduceTPB, 4096 >> >(g_odata, threshold, numOfBlocks, numOfBlocks2);
cudaDeviceSynchronize();
updateKernelPrefix << < numOfBlocks, threadsPerBlock >> >(edgeIndex, edges, costs, nodeW, F, headF, U, isInQ, partialSums, Qcondition, threshold, numOfThreads);
shfl_scan_test << <gridSize, blockSize, shmem_sz >> >(isInQ, 32, partialSums);
shfl_scan_test << <p_gridSize, p_blockSize, shmem_sz >> >(partialSums, 32);
uniform_add << <gridSize - 1, blockSize >> >(isInQ + blockSize, partialSums, n_elements);
fillQPrefix << <gridSize, blockSize >> >(F, Qcondition, isInQ, n_elements,headF);
cudaDeviceSynchronize();
if (*threshold == INF)
{
//printf("%d %d\n", *headF, *threshold);
break;
}
}
}
}
}
__global__ void cudaWarmup()
{
int i = GET_THREAD_ID
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
struct Edge {
int head;
int cost;
};
using Graph = std::vector<std::vector<Edge>>;
using SingleSP = vector<int>;
using AllSP = vector<vector<int>>;
SingleSP djikstra(const Graph& g, int s) {
SingleSP dist(g.size(), INF);
set<pair<int, int>> frontier;
frontier.insert({ 0, s });
while (!frontier.empty()) {
pair<int, int> p = *frontier.begin();
frontier.erase(frontier.begin());
int d = p.first;
int n = p.second;
// this is our shortest path to n
dist[n] = d;
// now look at all edges out from n to update the frontier
for (auto e : g[n]) {
// update this node in the frontier if we have a shorter path
if (dist[n] + e.cost < dist[e.head]) {
if (dist[e.head] != INF) {
// we've seen this node before, so erase it from the set in order to update it
frontier.erase(frontier.find({ dist[e.head], e.head }));
}
frontier.insert({ dist[n] + e.cost, e.head });
dist[e.head] = dist[n] + e.cost;
}
}
}
return dist;
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
void GPUDijkstraQ(int edgeSize, int nodeSize, int source, int* d_edgeIndex, int* d_edges, int* d_costs, int* nodeW, int* nodeParent){
int * d_nodeW = 0;
int* d_nodeParent = 0;
int * d_headF = 0;
int * d_head2 = 0;
int* d_source = 0;
int* d_F = 0; // Frontier set
int* d_U = 0; // Unsettled set
int* d_threshold = 0;
int* d_deltas = 0;
int* g_odata = 0;
int* d_numOfThreads = 0;
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
int numOfThreads = 1024;
int numOfBlocks = nodeSize / numOfThreads + (nodeSize%numOfThreads == 0 ? 0 : 1);
cudaMalloc((void**)&d_nodeW, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_source, sizeof(int));
cudaMalloc((void**)&d_F, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_U, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_threshold, 512 * sizeof(int));
cudaMalloc((void**)&d_deltas, sizeof(int) * nodeSize);
cudaMalloc((void**)&g_odata, sizeof(int) * 1024/* blocksize max 1024*/);
cudaMalloc((void**)&d_numOfThreads, sizeof(int));
cudaMemcpy(d_source, &source, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_numOfThreads, &nodeSize, sizeof(int), cudaMemcpyHostToDevice);
/* TEST DIJKSTRA WITH QUEUE */
DijkstraQ << <numOfBlocks, numOfThreads >> >(d_edgeIndex, d_edges, d_costs, d_nodeW, d_nodeParent, d_source, d_F, d_headF, d_U, d_threshold, d_deltas, g_odata, d_numOfThreads);
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time with Q: %lf ms\n", elapsedTime);
cudaMemcpy(nodeW, d_nodeW, nodeSize*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(nodeParent, d_nodeParent, nodeSize*sizeof(int), cudaMemcpyDeviceToHost);
std::cout << "**************************************" << std::endl;
for (int i = 0; i < 5; i++){
int next = nodeParent[i];
if (next == -1){
std::cout << "unreachable" << std::endl;
continue;;
}
std::cout << i << " ";
while (next != -2){
std::cout << next << " ";
next = nodeParent[next];
}
std::cout << " ----> " << nodeW[i];
std::cout << std::endl;
}
cudaFree(d_source);
cudaFree(d_nodeW);
cudaFree(d_F);
cudaFree(d_U);
cudaFree(d_threshold);
cudaFree(d_deltas);
cudaFree(g_odata);
cudaFree(d_numOfThreads);
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
void GPUDijkstra(int edgeSize, int nodeSize, int source, int* d_edgeIndex, int* d_edges, int* d_costs, int* nodeW, int* nodeParent){
int * d_nodeW = 0;
int* d_nodeParent = 0;
int* d_source = 0;
int* d_F = 0; // Frontier set
int* d_U = 0; // Unsettled set
int* d_threshold = 0;
int* d_deltas = 0;
int* g_odata = 0;
int* d_numOfThreads = 0;
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
int numOfThreads = BS;
int numOfBlocks = nodeSize / numOfThreads + (nodeSize%numOfThreads == 0 ? 0 : 1);
cudaMalloc((void**)&d_nodeW, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_nodeParent, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_source, sizeof(int));
cudaMalloc((void**)&d_F, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_U, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_threshold, sizeof(int));
cudaMalloc((void**)&d_deltas, sizeof(int) * nodeSize);
cudaMalloc((void**)&g_odata, sizeof(int) * 1024/* blocksize max 1024*/);
cudaMalloc((void**)&d_numOfThreads, sizeof(int));
cudaMemcpy(d_source, &source, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_numOfThreads, &nodeSize, sizeof(int), cudaMemcpyHostToDevice);
/* RUN DIJKSTRA*/
Dijkstra << <numOfBlocks, numOfThreads >> >(d_edgeIndex, d_edges, d_costs, d_nodeW, d_nodeParent, d_source, d_F, d_U, d_threshold, d_deltas, g_odata, d_numOfThreads);
cudaMemcpy(nodeW, d_nodeW, nodeSize*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %lf ms\n", elapsedTime);
cudaFree(d_source);
cudaFree(d_nodeW);
cudaFree(d_nodeParent);
cudaFree(d_F);
cudaFree(d_U);
cudaFree(d_threshold);
cudaFree(d_deltas);
cudaFree(g_odata);
cudaFree(d_numOfThreads);
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
void oneGPUDijkstra(int edgeSize, int nodeSize, int source, int head1, int head2, int mutex, int* d_edgeIndex, int* d_edges, int* d_costs, int* d_deltas, vector<int*>& allWeights, cudaStream_t* stream){
int* nodeW = allWeights[0];
int* nodeParent = 0;
int * d_nodeW = 0;
int* d_nodeParent = 0;
int* d_source = 0;
int* d_F = 0; // Frontier set
int* d_U = 0; // Unsettled set
int* d_threshold = 0;
int* g_odata = 0;
int* d_numOfThreads = 0;
int numOfThreads = BS;
int numOfBlocks = nodeSize / numOfThreads + (nodeSize%numOfThreads == 0 ? 0 : 1);
cudaMalloc((void**)&d_nodeW, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_nodeParent, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_source, sizeof(int));
cudaMalloc((void**)&d_F, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_U, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_threshold, sizeof(int));
cudaMalloc((void**)&g_odata, sizeof(int) * 1024/* blocksize max 1024*/);
cudaMalloc((void**)&d_numOfThreads, sizeof(int));
cudaMemcpyAsync(d_source, &source, sizeof(int), cudaMemcpyHostToDevice, *stream);
cudaMemcpyAsync(d_numOfThreads, &nodeSize, sizeof(int), cudaMemcpyHostToDevice, *stream);
/* RUN DIJKSTRA */
Dijkstra << <numOfBlocks, numOfThreads, 0, *stream >> >(d_edgeIndex, d_edges, d_costs, d_nodeW, d_nodeParent, d_source, d_F, d_U, d_threshold, d_deltas, g_odata, d_numOfThreads);
cudaMemcpyAsync(nodeW, d_nodeW, nodeSize*sizeof(int), cudaMemcpyDeviceToHost, *stream);
//cudaMemcpy(nodeParent, d_nodeParent, nodeSize*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_source);
cudaFree(d_nodeW);
cudaFree(d_nodeParent);
cudaFree(d_F);
cudaFree(d_U);
cudaFree(d_threshold);
cudaFree(g_odata);
cudaFree(d_numOfThreads);
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
void oneGPUDijkstraQ(int edgeSize, int nodeSize, int source, int* d_edgeIndex, int* d_edges, int* d_costs, int* d_deltas, vector<int*>& allWeights, cudaStream_t* stream){
//int* nodeW = allWeights[source];
int* nodeW = allWeights[0];
int* nodeParent = 0;
int * d_nodeW = 0;
int* d_nodeParent = 0;
int* d_source = 0;
int* d_F = 0; // Frontier set
int* d_headF = 0;
int* d_U = 0; // Unsettled set
int* d_threshold = 0;
int* g_odata = 0;
int* d_numOfThreads = 0;
int numOfThreads = BS;
int numOfBlocks = nodeSize / numOfThreads + (nodeSize%numOfThreads == 0 ? 0 : 1);
cudaMalloc((void**)&d_nodeW, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_nodeParent, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_source, sizeof(int));
cudaMalloc((void**)&d_F, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_U, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_headF, sizeof(int));
cudaMalloc((void**)&d_threshold, sizeof(int));
cudaMalloc((void**)&g_odata, sizeof(int) * 1024/* blocksize max 1024*/);
cudaMalloc((void**)&d_numOfThreads, sizeof(int));
cudaMemcpyAsync(d_source, &source, sizeof(int), cudaMemcpyHostToDevice, *stream);
cudaMemcpyAsync(d_numOfThreads, &nodeSize, sizeof(int), cudaMemcpyHostToDevice, *stream);
/* RUN DIJKSTRA WITH QUEUE */
DijkstraQ << <numOfBlocks, numOfThreads, 0, *stream >> >(d_edgeIndex, d_edges, d_costs, d_nodeW, d_nodeParent, d_source, d_F, d_headF, d_U, d_threshold, d_deltas, g_odata, d_numOfThreads);
//cudaDeviceSynchronize();
cudaMemcpyAsync(nodeW, d_nodeW, nodeSize*sizeof(int), cudaMemcpyDeviceToHost, *stream);
/*cudaDeviceSynchronize();
cout << source << endl;
for (int i = 0; i < nodeSize; i++)
cout << allWeights[source][i] << " ";
cout << endl;*/
//cudaMemcpy(nodeParent, d_nodeParent, nodeSize*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_source);
cudaFree(d_nodeW);
cudaFree(d_nodeParent);
cudaFree(d_F);
cudaFree(d_U);
cudaFree(d_threshold);
cudaFree(g_odata);
cudaFree(d_numOfThreads);
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
void oneGPUDijkstraQVerify(int edgeSize, int nodeSize, int source, int* d_edgeIndex, int* d_edges, int* d_costs, int* d_deltas, vector<int*>& allWeights, cudaStream_t* stream){
int* nodeW = allWeights[source];
int* nodeParent = 0;
int * d_nodeW = 0;
int* d_nodeParent = 0;
int* d_source = 0;
int* d_F = 0; // Frontier set
int* d_headF = 0;
int* d_U = 0; // Unsettled set
int* d_threshold = 0;
int* g_odata = 0;
int* d_numOfThreads = 0;
int numOfThreads = BS;
int numOfBlocks = nodeSize / numOfThreads + (nodeSize%numOfThreads == 0 ? 0 : 1);
cudaMalloc((void**)&d_nodeW, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_nodeParent, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_source, sizeof(int));
cudaMalloc((void**)&d_F, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_U, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_headF, sizeof(int));
cudaMalloc((void**)&d_threshold, sizeof(int));
cudaMalloc((void**)&g_odata, sizeof(int) * 1024/* blocksize max 1024*/);
cudaMalloc((void**)&d_numOfThreads, sizeof(int));
cudaMemcpyAsync(d_source, &source, sizeof(int), cudaMemcpyHostToDevice, *stream);
cudaMemcpyAsync(d_numOfThreads, &nodeSize, sizeof(int), cudaMemcpyHostToDevice, *stream);
/* RUN DIJKSTRA WITH QUEUE */
DijkstraQ << <numOfBlocks, numOfThreads, 0, *stream >> >(d_edgeIndex, d_edges, d_costs, d_nodeW, d_nodeParent, d_source, d_F, d_headF, d_U, d_threshold, d_deltas, g_odata, d_numOfThreads);
//cudaDeviceSynchronize();
cudaMemcpyAsync(nodeW, d_nodeW, nodeSize*sizeof(int), cudaMemcpyDeviceToHost, *stream);
/*cudaDeviceSynchronize();
cout << source << endl;
for (int i = 0; i < nodeSize; i++)
cout << allWeights[source][i] << " ";
cout << endl;*/
//cudaMemcpy(nodeParent, d_nodeParent, nodeSize*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_source);
cudaFree(d_nodeW);
cudaFree(d_nodeParent);
cudaFree(d_F);
cudaFree(d_U);
cudaFree(d_threshold);
cudaFree(g_odata);
cudaFree(d_numOfThreads);
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
void oneGPUDijkstraPrefix(int edgeSize, int nodeSize, int source, int* d_edgeIndex, int* d_edges, int* d_costs, int* d_deltas, vector<int*>& allWeights, cudaStream_t* stream){
//int* nodeW = allWeights[source];
int* nodeW = allWeights[0];
int* nodeParent = 0;
int * d_nodeW = 0;
int* d_nodeParent = 0;
int* d_source = 0;
int* d_F = 0; // Frontier set
int* d_headF = 0;
int* d_U = 0; // Unsettled set
int* d_threshold = 0;
int* g_odata = 0;
int* d_numOfThreads = 0;
int* d_isInQ = 0;
int* d_partialSums = 0;
int* d_Qcondition = 0;
int numOfThreads = BS;
int numOfBlocks = nodeSize / numOfThreads + (nodeSize%numOfThreads == 0 ? 0 : 1);
cudaMalloc((void**)&d_nodeW, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_nodeParent, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_source, sizeof(int));
cudaMalloc((void**)&d_F, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_U, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_headF, sizeof(int));
cudaMalloc((void**)&d_threshold, sizeof(int));
cudaMalloc((void**)&g_odata, sizeof(int) * 1024/* blocksize max 1024*/);
cudaMalloc((void**)&d_numOfThreads, sizeof(int));
cudaMalloc((void**)&d_Qcondition, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_isInQ, sizeof(int) * nodeSize);
int n_partialSums = nodeSize / BS + (nodeSize % BS == 0 ? 0 : 1);
cudaMalloc((void**)&d_partialSums, sizeof(int) * n_partialSums);
cudaMemcpyAsync(d_source, &source, sizeof(int), cudaMemcpyHostToDevice, *stream);
cudaMemcpyAsync(d_numOfThreads, &nodeSize, sizeof(int), cudaMemcpyHostToDevice, *stream);
/* RUN DIJKSTRA WITH QUEUE */
DijkstraPrefix << <numOfBlocks, numOfThreads, 0, *stream >> >(d_edgeIndex, d_edges, d_costs, d_nodeW, d_nodeParent, d_source, d_F, d_headF, d_U, d_isInQ, d_partialSums, d_Qcondition, d_threshold, d_deltas, g_odata, d_numOfThreads);
cudaMemcpyAsync(nodeW, d_nodeW, nodeSize*sizeof(int), cudaMemcpyDeviceToHost, *stream);
/*cudaDeviceSynchronize();
cout << source << endl;
for (int i = 0; i < nodeSize; i++)
cout << allWeights[source][i] << " ";
cout << endl;*/
//cudaMemcpy(nodeParent, d_nodeParent, nodeSize*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_source);
cudaFree(d_nodeW);
cudaFree(d_nodeParent);
cudaFree(d_F);
cudaFree(d_headF);
cudaFree(d_U);
cudaFree(d_threshold);
cudaFree(g_odata);
cudaFree(d_numOfThreads);
cudaFree(d_isInQ);
cudaFree(d_Qcondition);
cudaFree(d_partialSums);
}
void oneGPUDijkstraPrefixVerify(int edgeSize, int nodeSize, int source, int* d_edgeIndex, int* d_edges, int* d_costs, int* d_deltas, vector<int*>& allWeights, cudaStream_t* stream){
int* nodeW = allWeights[source];
//int* nodeW = allWeights[0];
int* nodeParent = 0;
int * d_nodeW = 0;
int* d_nodeParent = 0;
int* d_source = 0;
int* d_F = 0; // Frontier set
int* d_headF = 0;
int* d_U = 0; // Unsettled set
int* d_threshold = 0;
int* g_odata = 0;
int* d_numOfThreads = 0;
int* d_isInQ = 0;
int* d_partialSums = 0;
int* d_Qcondition = 0;
int numOfThreads = BS;
int numOfBlocks = nodeSize / numOfThreads + (nodeSize%numOfThreads == 0 ? 0 : 1);
cudaMalloc((void**)&d_nodeW, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_nodeParent, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_source, sizeof(int));
cudaMalloc((void**)&d_F, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_U, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_headF, sizeof(int));
cudaMalloc((void**)&d_threshold, sizeof(int));
cudaMalloc((void**)&g_odata, sizeof(int) * 1024/* blocksize max 1024*/);
cudaMalloc((void**)&d_numOfThreads, sizeof(int));
cudaMalloc((void**)&d_Qcondition, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_isInQ, sizeof(int) * nodeSize);
int n_partialSums = nodeSize / BS + (nodeSize % BS == 0 ? 0 : 1);
cudaMalloc((void**)&d_partialSums, sizeof(int) * n_partialSums);
cudaMemcpyAsync(d_source, &source, sizeof(int), cudaMemcpyHostToDevice, *stream);
cudaMemcpyAsync(d_numOfThreads, &nodeSize, sizeof(int), cudaMemcpyHostToDevice, *stream);
/* RUN DIJKSTRA WITH QUEUE */
DijkstraPrefix << <numOfBlocks, numOfThreads, 0, *stream >> >(d_edgeIndex, d_edges, d_costs, d_nodeW, d_nodeParent, d_source, d_F, d_headF, d_U, d_isInQ, d_partialSums, d_Qcondition, d_threshold, d_deltas, g_odata, d_numOfThreads);
//cudaDeviceSynchronize();
cudaMemcpyAsync(nodeW, d_nodeW, nodeSize*sizeof(int), cudaMemcpyDeviceToHost, *stream);
/*cudaDeviceSynchronize();
cout << source << endl;
for (int i = 0; i < nodeSize; i++)
cout << allWeights[source][i] << " ";
cout << endl;*/
//cudaMemcpy(nodeParent, d_nodeParent, nodeSize*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_source);
cudaFree(d_nodeW);
cudaFree(d_nodeParent);
cudaFree(d_F);
cudaFree(d_U);
cudaFree(d_threshold);
cudaFree(g_odata);
cudaFree(d_numOfThreads);
}
void Johnson1(int* outW, int* edgeIndex, int* edges, int* costs, int nodeSize, int edgeSize){
int source = nodeSize;
edgeSize += nodeSize;
nodeSize++;
int head1 = 0;
int head2 = 0;
int mutex = 0;
int* d_nodeW;
int* d_nodeParent;
int* F1 = 0;
int* F2 = 0;
int * d_head1 = 0;
int * d_head2 = 0;
int* d_itNo = 0;
int* d_source = 0;
int* d_mutex = 0;
int *d_numOfThreads = 0;
cudaMalloc((void**)&F1, sizeof(int) * nodeSize);
cudaMalloc((void**)&F2, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_itNo, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_head1, sizeof(int));
cudaMalloc((void**)&d_head2, sizeof(int));
cudaMalloc((void**)&d_source, sizeof(int));
cudaMalloc((void**)&d_mutex, sizeof(int));
cudaMalloc((void**)&d_nodeParent, sizeof(int) * nodeSize);
cudaMalloc((void**)&d_numOfThreads, sizeof(int));
cudaMalloc((void**)&d_nodeW, sizeof(int) * nodeSize);
int* d_edgeIndex, *d_edges, *d_costs;
cudaMalloc((void**)&d_edgeIndex, sizeof(int) * (nodeSize + 1));
cudaMalloc((void**)&d_edges, sizeof(int) * edgeSize);
cudaMalloc((void**)&d_costs, sizeof(int) * edgeSize);
cudaMemcpy(d_edgeIndex, edgeIndex, sizeof(int) * (nodeSize + 1), cudaMemcpyHostToDevice);
cudaMemcpy(d_edges, edges, sizeof(int) * (edgeSize), cudaMemcpyHostToDevice);
cudaMemcpy(d_costs, costs, sizeof(int) * (edgeSize), cudaMemcpyHostToDevice);
cudaMemcpy(d_head1, &head1, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_head2, &head2, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_source, &source, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_mutex, &mutex, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_numOfThreads, &nodeSize, sizeof(int), cudaMemcpyHostToDevice);
int numOfThreads = BS;
int numOfBlocks = nodeSize / numOfThreads + (nodeSize%numOfThreads == 0 ? 0 : 1);
BF << <numOfBlocks, numOfThreads >> >(d_edgeIndex, d_edges, d_costs, d_nodeW, d_nodeParent, d_itNo, d_source, F1, F2, d_head1, d_head2, d_mutex, d_numOfThreads);
cudaMemcpy(costs, d_costs, edgeSize*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(outW, d_nodeW, (nodeSize)*sizeof(int), cudaMemcpyDeviceToHost);
//for (int i = 0; i < nodeSize; i++)
// cout << outW[i]<<" " ;
//cout << endl << endl;
//for (int i = 0; i < edgeSize ; i++)
// cout <<costs[i]<<" ";
//cout << endl << endl;
}
Graph addZeroEdge(Graph g) {
// add a zero-cost edge from vertex 0 to all other edges
for (int i = 1; i < g.size(); i++) {
g[0].push_back({ i, 0 });
}
return g;
}
SingleSP bellmanford(Graph &g, int s) {
vector<vector<int>> memo(1, vector<int>(g.size(), INF));
// initialise base case
memo[0][s] = 0;
for (int i = 1; i < memo.size(); i++) {
// compute shortest paths from s to all vertices, with max hop-count i
for (int n = 0; n < g.size(); n++) {
/* if (memo[0][n] < memo[0][n]) {
memo[0][n] = memo[0][n];
}*/
for (auto& e : g[n]) {
if (memo[0][n] != INF) {
if (memo[0][n] + e.cost < memo[0][e.head]) {
memo[0][e.head] = memo[0][n] + e.cost;
}
}
}
}
}
// check if the last iteration differed from the 2nd-last
/*for (int j = 0; j < g.size(); j++) {
if (memo[g.size() + 1][j] != memo[g.size()][j]) {
throw string{ "negative cycle found" };
}
}*/
return memo[0];
}
/**********************************************************************************************************************************/
/**********************************************************************************************************************************/
// CPU - GPU verification
int main1()
{
srand(time(NULL));
int edgeSize = 12;
int nodeSize = 9;
int source = 0;
int head1 = 0;
int head2 = 0;
int mutex = 0;
std::ifstream f;
string graphName = "graph1.txt" ;
//string graphName = "graph2.txt";
//string graphName = "graph3.txt";
//string graphName = "graph4.txt";
//string graphName = "graph5.txt";
//string graphName = "graph6.txt";
//string graphName = "graph7.txt";
//string graphName = "graph8.txt";
//string graphName = "graph9.txt";
//std::string graphName = "50k_1m.txt";
f.open(graphName);
std::cout << graphName << std::endl;
if (!f.is_open())
{
std::cout << "File not found!" << std::endl;
getchar();
return -1;
}
f >> nodeSize;
f >> edgeSize;
cout << edgeSize << " " << nodeSize << endl;
int* edgeIndex, *edges, *costs;
cudaMallocHost((void**)&edgeIndex, (nodeSize + 2)*sizeof(int));
cudaMallocHost((void**)&edges, (edgeSize + nodeSize)*sizeof(int));
cudaMallocHost((void**)&costs, (edgeSize + nodeSize)*sizeof(int));
int* nodeW = new int[nodeSize];
int* nodeParent = new int[nodeSize];
/*******************/
Graph g;
g.resize(nodeSize);
/******************/
std::vector<std::vector<int>> edgesVector;
edgesVector.resize(nodeSize + 1);
std::vector<std::vector<int>> costsVector;
costsVector.resize(nodeSize + 1);
for (int i = 0; i < edgeSize; i++){
int from, to;
int cost;
f >> from;
f >> to;
//from--;
//to--;
f >> cost;
//cost = rand() % 10 + 1;
edgesVector[from].push_back(to);
costsVector[from].push_back(cost);
/***********/
Edge e;
e.head = to;
e.cost = cost;
g[from].push_back(e);
/***********/
}
for (int i = 0; i < nodeSize; i++){
edgesVector[nodeSize].push_back(i);
costsVector[nodeSize].push_back(0);
}
int offset = 0;
for (int i = 0; i < nodeSize; i++){
edgeIndex[i] = offset;
//printf("%d", offset);
int end = offset + edgesVector[i].size();
for (int j = offset; j < end; j++){
edges[j] = edgesVector[i][j - offset];
costs[j] = costsVector[i][j - offset];
}
offset = end;
}
edgeIndex[nodeSize] = edgeSize;
for (int i = edgeSize; i < edgeSize + nodeSize; i++){
edges[i] = edgesVector[nodeSize][i - edgeSize];
costs[i] = costsVector[nodeSize][i - edgeSize];
}
edgeIndex[nodeSize + 1] = edgeSize + nodeSize;
f.close();
//GPUDijkstraQ(edgeSize, nodeSize, source, head1, head2, mutex, edgeIndex, edges, costs, nodeW, nodeParent);
//GPUDijkstra(edgeSize, nodeSize, source, head1, head2, mutex, edgeIndex, edges, costs, nodeW, nodeParent);
vector<int*> allWeights;
//for (int w = 0; w < nodeSize; w++)
//{
// int* amca = new int[nodeSize + 1];
// allWeights.push_back(amca);
//}
for (int w = 0; w < nodeSize; w++)
{
int* amca = new int[nodeSize + 1];
allWeights.push_back(amca);
}
int* d_edgeIndex, *d_edges, *d_costs;
cudaMalloc((void**)&d_edgeIndex, sizeof(int) * (nodeSize + 1));
cudaMalloc((void**)&d_edges, sizeof(int) * edgeSize);
cudaMalloc((void**)&d_costs, sizeof(int) * edgeSize);
const int numOfStreams = 1;
cudaStream_t streams[numOfStreams];
for (int i = 0; i < numOfStreams; i++)
{
cudaStreamCreate(&streams[i]);
}
cudaMemcpy(d_edgeIndex, edgeIndex, sizeof(int) * (nodeSize + 1), cudaMemcpyHostToDevice);
cudaMemcpy(d_edges, edges, sizeof(int) * (edgeSize), cudaMemcpyHostToDevice);
//GPUDijkstra(edgeSize, nodeSize, source, head1, head2, mutex, edgeIndex, edges, costs, nodeW, nodeParent);
//GPUDijkstra(edgeSize, nodeSize, source, head1, head2, mutex, edgeIndex, edges, costs, allWeights[0], nodeParent);
//oneGPUDijkstra(edgeSize, nodeSize, 0, head1, head2, mutex, edgeIndex, edges, costs, allWeights);
//cudaProfilerStart();
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
Johnson1(nodeW, edgeIndex, edges, costs, nodeSize, edgeSize);
cudaMemcpy(d_costs, costs, sizeof(int) * (edgeSize), cudaMemcpyHostToDevice);
int* d_deltas = 0;
int* d_numOfThreads = 0;
cudaMalloc((void**)&d_numOfThreads, sizeof(int));
cudaMalloc((void**)&d_deltas, sizeof(int) * nodeSize);
cudaMemcpy(d_numOfThreads, &nodeSize, sizeof(int) * (edgeSize), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
int threadsPerBlock = BS;
int numOfBlocks = nodeSize / threadsPerBlock + (nodeSize % threadsPerBlock == 0 ? 0 : 1);
computeDeltaUKernel << < numOfBlocks, threadsPerBlock >> >(d_edgeIndex, d_edges, d_costs, d_deltas, d_numOfThreads);
cudaDeviceSynchronize();
for (int n = 0; n < nodeSize; n++)
{
//cudaDeviceSynchronize();
//std::cout << n << std::endl;
//oneGPUDijkstra(edgeSize, nodeSize, n, head1, head2, mutex, d_edgeIndex, d_edges, d_costs, d_deltas, allWeights, &streams[n%numOfStreams]);
//oneGPUDijkstraQVerify(edgeSize, nodeSize, n, d_edgeIndex, d_edges, d_costs, d_deltas, allWeights, &streams[n%numOfStreams]);
oneGPUDijkstraPrefixVerify(edgeSize, nodeSize, n, d_edgeIndex, d_edges, d_costs, d_deltas, allWeights, &streams[n%numOfStreams]);
//std::cout << n << std::endl;
}
cout << "GPU done" << endl;
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %lf ms\n", elapsedTime);
StartCounter();
Graph gprime = addZeroEdge(g);
SingleSP ssp;
try {
ssp = bellmanford(gprime, 0);
}
catch (string e) {
cout << "Negative cycles found in graph. Cannot compute shortest paths." << endl;
throw e;
}
for (int i = 1; i < g.size(); i++) {
for (auto &e : g[i]) {
e.cost = e.cost + ssp[i] - ssp[e.head];
}
}
AllSP allsp(g.size());
for (int i = 0; i < g.size(); i++) {
allsp[i] = djikstra(g, i);
}
cout << "CPU Time: " << GetCounter() << endl;
//cout << "GPU Matrix:" << endl;
//for (unsigned int i = 0; i < 1; i++){
// for (unsigned int j = 0; j < allWeights.size(); j++)
// cout << allWeights[i][j] << " ";
// cout << endl;
//}
/*
cout << "CPU Matrix:" << endl;
cout << endl << endl;
for (unsigned int i = 0; i < allsp.size(); i++){
for (unsigned int j = 0; j < allsp[i].size(); j++)
cout << allsp[i][j] << " ";
cout << endl;
}*/
int count = 0;
bool succes = true;
for (unsigned int i = 0; i < allWeights.size(); i++){
for (unsigned int j = 0; j < allWeights.size(); j++){
//cout << allsp[i][j] << " " << allWeights[i][j] << endl;
if (allsp[i][j] != allWeights[i][j]){
succes = false;
count++;
//cout << i << endl;
//cout << "***************************" << endl;
}
}
}
if (succes)
std::cout << "successful" << std::endl;
else
std::cout << "fail" << std::endl;
if (count)
cout << count<< endl;
getchar();
//delete[] edgeIndex;
//delete[] edges;
//delete[] costs;
cudaFreeHost(edgeIndex);
cudaFreeHost(edges);
cudaFreeHost(costs);
//delete[] nodeW;
//delete[] nodeParent;
//delete[] streams;
return 0;
}
//! gpu performance
int main()
{
srand(time(NULL));
int edgeSize = 12;
int nodeSize = 9;
int source = 0;
int head1 = 0;
int head2 = 0;
int mutex = 0;
std::ifstream f;
//string graphName = "graph1.txt";
//string graphName = "graph2.txt";
//string graphName = "graph3.txt";
string graphName = "graph4.txt";
//string graphName = "graph5.txt";
//string graphName = "graph6.txt";
//string graphName = "graph7.txt";
//string graphName = "graph8.txt";
f.open(graphName);
std::cout << graphName << std::endl;
if (!f.is_open())
{
std::cout << "File not found!" << std::endl;
getchar();
return -1;
}
f >> nodeSize;
f >> edgeSize;
cout << edgeSize << " " << nodeSize << endl;
int* edgeIndex, *edges, *costs;
cudaMallocHost((void**)&edgeIndex, (nodeSize + 2)*sizeof(int));
cudaMallocHost((void**)&edges, (edgeSize + nodeSize)*sizeof(int));
cudaMallocHost((void**)&costs, (edgeSize + nodeSize)*sizeof(int));
int* nodeW = new int[nodeSize];
int* nodeParent = new int[nodeSize];
/*******************/
Graph g;
g.resize(nodeSize);
/******************/
std::vector<std::vector<int>> edgesVector;
edgesVector.resize(nodeSize + 1);
std::vector<std::vector<int>> costsVector;
costsVector.resize(nodeSize + 1);
for (int i = 0; i < edgeSize; i++){
int from, to;
int cost;
f >> from;
f >> to;
//from--;
//to--;
f >> cost;
//cost = rand() % 10 + 1;
edgesVector[from].push_back(to);
costsVector[from].push_back(cost);
/***********/
Edge e;
e.head = to;
e.cost = cost;
g[from].push_back(e);
/***********/
}
for (int i = 0; i < nodeSize; i++){
edgesVector[nodeSize].push_back(i);
costsVector[nodeSize].push_back(0);
}
int offset = 0;
for (int i = 0; i < nodeSize; i++){
edgeIndex[i] = offset;
//printf("%d", offset);
int end = offset + edgesVector[i].size();
for (int j = offset; j < end; j++){
edges[j] = edgesVector[i][j - offset];
costs[j] = costsVector[i][j - offset];
}
offset = end;
}
edgeIndex[nodeSize] = edgeSize;
for (int i = edgeSize; i < edgeSize + nodeSize; i++){
edges[i] = edgesVector[nodeSize][i - edgeSize];
costs[i] = costsVector[nodeSize][i - edgeSize];
}
edgeIndex[nodeSize + 1] = edgeSize + nodeSize;
f.close();
vector<int*> allWeights;
for (int w = 0; w < 1; w++)
{
int* amca = new int[nodeSize + 1];
allWeights.push_back(amca);
}
int* d_edgeIndex, *d_edges, *d_costs;
cudaMalloc((void**)&d_edgeIndex, sizeof(int) * (nodeSize + 1));
cudaMalloc((void**)&d_edges, sizeof(int) * edgeSize);
cudaMalloc((void**)&d_costs, sizeof(int) * edgeSize);
const int numOfStreams = 1;
cudaStream_t streams[numOfStreams];
for (int i = 0; i < numOfStreams; i++)
{
cudaStreamCreate(&streams[i]);
}
cudaMemcpy(d_edgeIndex, edgeIndex, sizeof(int) * (nodeSize + 1), cudaMemcpyHostToDevice);
cudaMemcpy(d_edges, edges, sizeof(int) * (edgeSize), cudaMemcpyHostToDevice);
//cudaProfilerStart();
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
Johnson1(nodeW, edgeIndex, edges, costs, nodeSize, edgeSize);
cudaMemcpy(d_costs, costs, sizeof(int) * (edgeSize), cudaMemcpyHostToDevice);
int* d_deltas = 0;
int* d_numOfThreads = 0;
cudaMalloc((void**)&d_numOfThreads, sizeof(int));
cudaMalloc((void**)&d_deltas, sizeof(int) * nodeSize);
cudaMemcpy(d_numOfThreads, &nodeSize, sizeof(int) * (edgeSize), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
int threadsPerBlock = BS;
int numOfBlocks = nodeSize / threadsPerBlock + (nodeSize % threadsPerBlock == 0 ? 0 : 1);
computeDeltaUKernel << < numOfBlocks, threadsPerBlock >> >(d_edgeIndex, d_edges, d_costs, d_deltas, d_numOfThreads);
cudaDeviceSynchronize();
for (int n = 0; n <nodeSize; n++)
{
oneGPUDijkstra(edgeSize, nodeSize, n, head1, head2, mutex, d_edgeIndex, d_edges, d_costs, d_deltas, allWeights, &streams[n%numOfStreams]);
//oneGPUDijkstraQ(edgeSize, nodeSize, n, d_edgeIndex, d_edges, d_costs, d_deltas, allWeights, &streams[n%numOfStreams]);
//oneGPUDijkstraPrefix(edgeSize, nodeSize, n, d_edgeIndex, d_edges, d_costs, d_deltas, allWeights, &streams[n%numOfStreams]);
}
cout << "done" << endl;
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %lf ms\n", elapsedTime);
getchar();
//delete[] edgeIndex;
//delete[] edges;
//delete[] costs;
cudaFreeHost(edgeIndex);
cudaFreeHost(edges);
cudaFreeHost(costs);
//delete[] nodeW;
//delete[] nodeParent;
//delete[] streams;
return 0;
}
//! cpu performance
int main2()
{
srand(time(NULL));
int edgeSize = 12;
int nodeSize = 9;
int source = 0;
int head1 = 0;
int head2 = 0;
int mutex = 0;
std::ifstream f;
//string graphName = "graph1.txt";
//string graphName = "graph2.txt";
//string graphName = "graph3.txt";
//string graphName = "graph4.txt";
//string graphName = "graph5.txt";
//string graphName = "graph6.txt";
//string graphName = "graph7.txt";
string graphName = "graph8.txt";
//std::string graphName = "50k_1m.txt";
f.open(graphName);
std::cout << graphName << std::endl;
if (!f.is_open())
{
std::cout << "File not found!" << std::endl;
getchar();
return -1;
}
f >> nodeSize;
f >> edgeSize;
cout << edgeSize << " " << nodeSize << endl;
int* edgeIndex, *edges, *costs;
cudaMallocHost((void**)&edgeIndex, (nodeSize + 2)*sizeof(int));
cudaMallocHost((void**)&edges, (edgeSize + nodeSize)*sizeof(int));
cudaMallocHost((void**)&costs, (edgeSize + nodeSize)*sizeof(int));
int* nodeW = new int[nodeSize];
int* nodeParent = new int[nodeSize];
/*******************/
Graph g;
g.resize(nodeSize);
/******************/
std::vector<std::vector<int>> edgesVector;
edgesVector.resize(nodeSize + 1);
std::vector<std::vector<int>> costsVector;
costsVector.resize(nodeSize + 1);
for (int i = 0; i < edgeSize; i++){
int from, to;
int cost;
f >> from;
f >> to;
//from--;
//to--;
//f >> cost;
cost = rand() % 10 + 1;
edgesVector[from].push_back(to);
costsVector[from].push_back(cost);
/***********/
Edge e;
e.head = to;
e.cost = cost;
g[from].push_back(e);
/***********/
}
for (int i = 0; i < nodeSize; i++){
edgesVector[nodeSize].push_back(i);
costsVector[nodeSize].push_back(0);
}
int offset = 0;
for (int i = 0; i < nodeSize; i++){
edgeIndex[i] = offset;
//printf("%d", offset);
int end = offset + edgesVector[i].size();
for (int j = offset; j < end; j++){
edges[j] = edgesVector[i][j - offset];
costs[j] = costsVector[i][j - offset];
}
offset = end;
}
edgeIndex[nodeSize] = edgeSize;
for (int i = edgeSize; i < edgeSize + nodeSize; i++){
edges[i] = edgesVector[nodeSize][i - edgeSize];
costs[i] = costsVector[nodeSize][i - edgeSize];
}
edgeIndex[nodeSize + 1] = edgeSize + nodeSize;
f.close();
StartCounter();
Graph gprime = addZeroEdge(g);
SingleSP ssp;
try {
ssp = bellmanford(gprime, 0);
}
catch (string e) {
cout << "Negative cycles found in graph. Cannot compute shortest paths." << endl;
throw e;
}
for (int i = 1; i < g.size(); i++) {
for (auto &e : g[i]) {
e.cost = e.cost + ssp[i] - ssp[e.head];
}
}
AllSP allsp(1);
for (int i = 0; i < g.size(); i++) {
allsp[0] = djikstra(g, i);
}
cout << "CPU Time: " << GetCounter() << endl;
getchar();
//delete[] edgeIndex;
//delete[] edges;
//delete[] costs;
cudaFreeHost(edgeIndex);
cudaFreeHost(edges);
cudaFreeHost(costs);
//delete[] nodeW;
//delete[] nodeParent;
//delete[] streams;
return 0;
}
__global__ void prescan(float *g_odata, float *g_idata, int *n)
{
int thid = threadIdx.x;
int offset = 1;
extern __shared__ float temp[]; // allocated on invocation
temp[2 * thid] = g_idata[2 * thid]; // load input into shared memory
temp[2 * thid + 1] = g_idata[2 * thid + 1];
for (int d = *n >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2 * thid + 1) - 1;
int bi = offset*(2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) { temp[*n - 1] = 0; } // clear the last element
for (int d1 = 1; d1 < *n; d1 *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d1)
{
int ai = offset*(2 * thid + 1) - 1;
int bi = offset*(2 * thid + 2) - 1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[2 * thid] = temp[2 * thid]; // write results to device memory
g_odata[2 * thid + 1] = temp[2 * thid + 1];
}
int makeItPowerOf2(int size){
int powerOfTwo = 1;
while (size > powerOfTwo){
powerOfTwo *= 2;
}
return powerOfTwo;
}
//! prefix sum test
int main4(int argc, char *argv[])
{
// Initialization. The shuffle intrinsic is not available on SM < 3.0
// so waive the test if the hardware is not present.
// int cuda_device = 0;
printf("Starting shfl_scan\n");
//// use command-line specified CUDA device, otherwise use device with highest Gflops/s
//cuda_device = findCudaDevice(argc, (const char **)argv);
//cudaDeviceProp deviceProp;
//checkCudaErrors(cudaGetDevice(&cuda_device));
//checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device));
//printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
// deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
//// __shfl intrinsic needs SM 3.0 or higher
//if (deviceProp.major < 3)
//{
// printf("> __shfl() intrinsic requires device SM 3.0+\n");
// printf("> Waiving test.\n");
// exit(EXIT_WAIVED);
//}
bool bTestResult = true;
bool simpleTest = shuffle_simple_test(argc, argv);
// bool intTest = shuffle_integral_image_test();
// bTestResult = simpleTest & intTest;
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
getchar();
cudaDeviceReset();
exit((bTestResult) ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
7272e3e07cde0c33ca348505133fcf4495083073.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* L-31 MCS 572 Wed 2 Nov 2016 : matmatmul2.cu
* This file contains a very basic CUDA implementation
* of the multiplication of two 0/1 matrices.
* At the command line the user must give three dimensions: n, m, and p.
* The program generates a random n-by-m 0/1 matrix A,
* a random m-by-p 0/1 matrix B, and then computes the product.
* This version uses the x and y component of threadIdx. */
#include <stdio.h>
#include <stdlib.h>
__host__ void randomMatrix ( int n, int m, float *x )
/*
* Fills up the n-by-m matrix x with random
* values of zeroes and ones. */
{
int i,j,r;
float *p = x;
for(i=0; i<n; i++)
for(j=0; j<m; j++)
{
r = rand() % 2;
*(p++) = (float) r;
}
}
__host__ void writeMatrix ( int n, int m, float *x )
/*
* Writes the n-by-m matrix x to screen. */
{
int i,j;
float *p = x;
for(i=0; i<n; i++,printf("\n"))
for(j=0; j<m; j++)
printf(" %d", (int)*(p++));
}
__global__ void matrixMultiply
( int n, int m, int p, float *A, float *B, float *C )
/*
* Multiplies the n-by-m matrix A
* with the m-by-p matrix B into the matrix C.
* The (i,j)-th thread computes the (i,j)-th element of C. */
{
int i = threadIdx.x;
int j = threadIdx.y;
int ell = i*p + j;
C[ell] = 0.0;
float *pB;
for(int k=0; k<m; k++)
{
pB = &B[j+k*p];
C[ell] += A[i*m+k]*(*pB);
}
}
int main ( int argc, char*argv[] )
{
if(argc < 4)
{
printf("call with 3 arguments :\n");
printf("dimensions n, m, and p\n");
}
else
{
int n = atoi(argv[1]); /* number of rows of A */
int m = atoi(argv[2]); /* number of columns of A */
/* and number of rows of B */
int p = atoi(argv[3]); /* number of columns of B */
srand(time(0));
printf("a random %d-by-%d 0/1 matrix A :\n",n,m);
float *Ahost = (float*)calloc(n*m,sizeof(float));
randomMatrix(n,m,Ahost); writeMatrix(n,m,Ahost);
printf("a random %d-by-%d 0/1 matrix B :\n",m,p);
float *Bhost = (float*)calloc(m*p,sizeof(float));
randomMatrix(m,p,Bhost); writeMatrix(m,p,Bhost);
float *Chost = (float*)calloc(n*p,sizeof(float));
/* allocate memory on the device for A, B, and C */
float *Adevice;
size_t sA = n*m*sizeof(float);
hipMalloc((void**)&Adevice,sA);
float *Bdevice;
size_t sB = m*p*sizeof(float);
hipMalloc((void**)&Bdevice,sB);
float *Cdevice;
size_t sC = n*p*sizeof(float);
hipMalloc((void**)&Cdevice,sC);
/* copy matrices A and B from host to the device */
hipMemcpy(Adevice,Ahost,sA,hipMemcpyHostToDevice);
hipMemcpy(Bdevice,Bhost,sB,hipMemcpyHostToDevice);
/* kernel invocation launching n*p threads */
dim3 dimGrid(1,1);
dim3 dimBlock(n,p);
hipLaunchKernelGGL(( matrixMultiply), dim3(dimGrid),dim3(dimBlock), 0, 0, n,m,p,Adevice,Bdevice,Cdevice);
/* copy matrix C from device to the host */
hipMemcpy(Chost,Cdevice,sC,hipMemcpyDeviceToHost);
/* freeing memory on the device */
hipFree(Adevice); hipFree(Bdevice); hipFree(Cdevice);
printf("the resulting %d-by-%d matrix C :\n",n,p);
writeMatrix(n,p,Chost);
}
return 0;
}
|
7272e3e07cde0c33ca348505133fcf4495083073.cu
|
/* L-31 MCS 572 Wed 2 Nov 2016 : matmatmul2.cu
* This file contains a very basic CUDA implementation
* of the multiplication of two 0/1 matrices.
* At the command line the user must give three dimensions: n, m, and p.
* The program generates a random n-by-m 0/1 matrix A,
* a random m-by-p 0/1 matrix B, and then computes the product.
* This version uses the x and y component of threadIdx. */
#include <stdio.h>
#include <stdlib.h>
__host__ void randomMatrix ( int n, int m, float *x )
/*
* Fills up the n-by-m matrix x with random
* values of zeroes and ones. */
{
int i,j,r;
float *p = x;
for(i=0; i<n; i++)
for(j=0; j<m; j++)
{
r = rand() % 2;
*(p++) = (float) r;
}
}
__host__ void writeMatrix ( int n, int m, float *x )
/*
* Writes the n-by-m matrix x to screen. */
{
int i,j;
float *p = x;
for(i=0; i<n; i++,printf("\n"))
for(j=0; j<m; j++)
printf(" %d", (int)*(p++));
}
__global__ void matrixMultiply
( int n, int m, int p, float *A, float *B, float *C )
/*
* Multiplies the n-by-m matrix A
* with the m-by-p matrix B into the matrix C.
* The (i,j)-th thread computes the (i,j)-th element of C. */
{
int i = threadIdx.x;
int j = threadIdx.y;
int ell = i*p + j;
C[ell] = 0.0;
float *pB;
for(int k=0; k<m; k++)
{
pB = &B[j+k*p];
C[ell] += A[i*m+k]*(*pB);
}
}
int main ( int argc, char*argv[] )
{
if(argc < 4)
{
printf("call with 3 arguments :\n");
printf("dimensions n, m, and p\n");
}
else
{
int n = atoi(argv[1]); /* number of rows of A */
int m = atoi(argv[2]); /* number of columns of A */
/* and number of rows of B */
int p = atoi(argv[3]); /* number of columns of B */
srand(time(0));
printf("a random %d-by-%d 0/1 matrix A :\n",n,m);
float *Ahost = (float*)calloc(n*m,sizeof(float));
randomMatrix(n,m,Ahost); writeMatrix(n,m,Ahost);
printf("a random %d-by-%d 0/1 matrix B :\n",m,p);
float *Bhost = (float*)calloc(m*p,sizeof(float));
randomMatrix(m,p,Bhost); writeMatrix(m,p,Bhost);
float *Chost = (float*)calloc(n*p,sizeof(float));
/* allocate memory on the device for A, B, and C */
float *Adevice;
size_t sA = n*m*sizeof(float);
cudaMalloc((void**)&Adevice,sA);
float *Bdevice;
size_t sB = m*p*sizeof(float);
cudaMalloc((void**)&Bdevice,sB);
float *Cdevice;
size_t sC = n*p*sizeof(float);
cudaMalloc((void**)&Cdevice,sC);
/* copy matrices A and B from host to the device */
cudaMemcpy(Adevice,Ahost,sA,cudaMemcpyHostToDevice);
cudaMemcpy(Bdevice,Bhost,sB,cudaMemcpyHostToDevice);
/* kernel invocation launching n*p threads */
dim3 dimGrid(1,1);
dim3 dimBlock(n,p);
matrixMultiply<<<dimGrid,dimBlock>>>(n,m,p,Adevice,Bdevice,Cdevice);
/* copy matrix C from device to the host */
cudaMemcpy(Chost,Cdevice,sC,cudaMemcpyDeviceToHost);
/* freeing memory on the device */
cudaFree(Adevice); cudaFree(Bdevice); cudaFree(Cdevice);
printf("the resulting %d-by-%d matrix C :\n",n,p);
writeMatrix(n,p,Chost);
}
return 0;
}
|
e45ea6b7c11f29a4bff245fe1a3d17ad0ee4ceea.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
/*
* Complex numbers
*/
typedef struct {
double real;
double imag;
} Complex;
__device__
Complex multiply(Complex a, Complex b) {
Complex result;
result.real = a.real*b.real - a.imag*b.imag;
result.imag = a.real*b.imag + a.imag*b.real;
return result;
};
__device__
Complex add(Complex a, Complex b) {
Complex result;
result.real = a.real + b.real;
result.imag = a.imag + b.imag;
return result;
}
__device__
double length(Complex z) {
return sqrt(z.real * z.real + z.imag * z.imag);
}
/*
* Mandelbrot
*/
__device__
bool is_out(Complex z) {
return length(z) > 2;
}
__device__
Complex mandelbrot_step(Complex z, Complex c) {
Complex z_new;
z_new = add(multiply(z, z), c);
return z_new;
};
__device__
int mandelbrot_point(Complex z, int maxiter) {
Complex c = z;
for (int i = 0; i < maxiter; i++) {
if (is_out(z)) {
return i;
}
z = mandelbrot_step(z, c);
};
return 0;
};
__global__
void mandelbrot_kernel(Complex* points, int* results, int points_count, int maxiter) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < points_count) {
results[i] = mandelbrot_point(points[i], maxiter);
}
};
Complex* get_points(Complex min, Complex max, int width_px, int height_px) {
int points_count = width_px * height_px;
Complex* results = (Complex*) malloc(sizeof(Complex) * points_count);
double real_step = (max.real - min.real) / width_px;
double imag_step = (max.imag - min.imag) / height_px;
int i = 0;
for (int x = 0; x < width_px; x++) {
for (int y = 0; y < height_px; y++) {
results[i].real = min.real + real_step*x;
results[i].imag = min.imag + imag_step*y;
i++;
}
}
return results;
};
int* calc_mandelbrot_set(Complex* points, int points_count, int maxiter) {
// int* result = (int*) malloc(sizeof(int) * points_count);
// for (int i = 0; i < points_count; i++) {
// result[i] = mandelbrot_point(points[i], maxiter);
// }
// return result;
int *host_results, *device_results;
Complex *device_points;
// Transfer input to device
hipMalloc(&device_points, points_count * sizeof(Complex));
hipMemcpy(device_points, points, points_count * sizeof(Complex), hipMemcpyHostToDevice);
// Allocate results
host_results = (int*) malloc(points_count * sizeof(int));
hipMalloc(&device_results, points_count * sizeof(int));
// Call kernel
int blocks = (points_count + 255)/256;
int threads = 256;
hipLaunchKernelGGL(( mandelbrot_kernel), dim3(blocks), dim3(threads), 0, 0, device_points, device_results, points_count, maxiter);
// Transfer results to host
hipMemcpy(host_results, device_results, points_count * sizeof(int), hipMemcpyDeviceToHost);
// Cleanup
return host_results;
};
void write_set(FILE* file, Complex* points, int* results, int count) {
fputs("real,imag,iter\n", file);
for (int i = 0; i < count; i++) {
Complex point = points[i];
fprintf(file, "%f,%f,%d\n", point.real, point.imag, results[i]);
};
};
int main(int argc, char *argv[] ) {
// Simple
const Complex MIN = {.real = -2.0, .imag = -1.25};
const Complex MAX = {.real = 0.5, .imag = 1.25};
// Cool example, needs 1000+ iterations
// const Complex MIN = {.real = -0.74877, .imag = 0.06505};
// const Complex MAX = {.real = -0.74872, .imag = 0.06510};
FILE* result_file;
if (argc < 4) {
printf("Usage:\n");
printf(" ./gpu <width px.> <height px.> <max iterations> [<result file>]\n");
return -1;
} else {
char* result_path;
int width_px = strtol(argv[1], NULL, 10);
int height_px = strtol(argv[2], NULL, 10);
int maxiter = strtol(argv[3], NULL, 10);
if (argc == 5) {
result_path = argv[4];
} else {
result_path = NULL;
};
printf("Running mandelbrot set on:");
printf("x = [%f - %f], ", MIN.real, MAX.real);
printf("y = [%f - %f]\n", MIN.imag, MAX.imag);
printf("Iterations: %d\n", maxiter);
int points_count = width_px * height_px;
Complex* points = get_points(MIN, MAX, width_px, height_px);
printf("Started...\n");
clock_t begin = clock();
int* results = calc_mandelbrot_set(points, points_count, maxiter);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Spent: %f seconds\n", time_spent);
if (result_path != NULL) {
result_file = fopen(result_path,"w");
if (result_file != NULL) {
printf("Writing to: \"%s\"\n", result_path);
write_set(result_file, points, results, points_count);
fclose (result_file);
printf("Done\n");
} else {
printf("Can not open result file");
return -1;
};
};
free(points);
free(results);
return 0;
}
}
|
e45ea6b7c11f29a4bff245fe1a3d17ad0ee4ceea.cu
|
#include <stdio.h>
#include <math.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
/*
* Complex numbers
*/
typedef struct {
double real;
double imag;
} Complex;
__device__
Complex multiply(Complex a, Complex b) {
Complex result;
result.real = a.real*b.real - a.imag*b.imag;
result.imag = a.real*b.imag + a.imag*b.real;
return result;
};
__device__
Complex add(Complex a, Complex b) {
Complex result;
result.real = a.real + b.real;
result.imag = a.imag + b.imag;
return result;
}
__device__
double length(Complex z) {
return sqrt(z.real * z.real + z.imag * z.imag);
}
/*
* Mandelbrot
*/
__device__
bool is_out(Complex z) {
return length(z) > 2;
}
__device__
Complex mandelbrot_step(Complex z, Complex c) {
Complex z_new;
z_new = add(multiply(z, z), c);
return z_new;
};
__device__
int mandelbrot_point(Complex z, int maxiter) {
Complex c = z;
for (int i = 0; i < maxiter; i++) {
if (is_out(z)) {
return i;
}
z = mandelbrot_step(z, c);
};
return 0;
};
__global__
void mandelbrot_kernel(Complex* points, int* results, int points_count, int maxiter) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < points_count) {
results[i] = mandelbrot_point(points[i], maxiter);
}
};
Complex* get_points(Complex min, Complex max, int width_px, int height_px) {
int points_count = width_px * height_px;
Complex* results = (Complex*) malloc(sizeof(Complex) * points_count);
double real_step = (max.real - min.real) / width_px;
double imag_step = (max.imag - min.imag) / height_px;
int i = 0;
for (int x = 0; x < width_px; x++) {
for (int y = 0; y < height_px; y++) {
results[i].real = min.real + real_step*x;
results[i].imag = min.imag + imag_step*y;
i++;
}
}
return results;
};
int* calc_mandelbrot_set(Complex* points, int points_count, int maxiter) {
// int* result = (int*) malloc(sizeof(int) * points_count);
// for (int i = 0; i < points_count; i++) {
// result[i] = mandelbrot_point(points[i], maxiter);
// }
// return result;
int *host_results, *device_results;
Complex *device_points;
// Transfer input to device
cudaMalloc(&device_points, points_count * sizeof(Complex));
cudaMemcpy(device_points, points, points_count * sizeof(Complex), cudaMemcpyHostToDevice);
// Allocate results
host_results = (int*) malloc(points_count * sizeof(int));
cudaMalloc(&device_results, points_count * sizeof(int));
// Call kernel
int blocks = (points_count + 255)/256;
int threads = 256;
mandelbrot_kernel<<<blocks, threads>>>(device_points, device_results, points_count, maxiter);
// Transfer results to host
cudaMemcpy(host_results, device_results, points_count * sizeof(int), cudaMemcpyDeviceToHost);
// Cleanup
return host_results;
};
void write_set(FILE* file, Complex* points, int* results, int count) {
fputs("real,imag,iter\n", file);
for (int i = 0; i < count; i++) {
Complex point = points[i];
fprintf(file, "%f,%f,%d\n", point.real, point.imag, results[i]);
};
};
int main(int argc, char *argv[] ) {
// Simple
const Complex MIN = {.real = -2.0, .imag = -1.25};
const Complex MAX = {.real = 0.5, .imag = 1.25};
// Cool example, needs 1000+ iterations
// const Complex MIN = {.real = -0.74877, .imag = 0.06505};
// const Complex MAX = {.real = -0.74872, .imag = 0.06510};
FILE* result_file;
if (argc < 4) {
printf("Usage:\n");
printf(" ./gpu <width px.> <height px.> <max iterations> [<result file>]\n");
return -1;
} else {
char* result_path;
int width_px = strtol(argv[1], NULL, 10);
int height_px = strtol(argv[2], NULL, 10);
int maxiter = strtol(argv[3], NULL, 10);
if (argc == 5) {
result_path = argv[4];
} else {
result_path = NULL;
};
printf("Running mandelbrot set on:");
printf("x = [%f - %f], ", MIN.real, MAX.real);
printf("y = [%f - %f]\n", MIN.imag, MAX.imag);
printf("Iterations: %d\n", maxiter);
int points_count = width_px * height_px;
Complex* points = get_points(MIN, MAX, width_px, height_px);
printf("Started...\n");
clock_t begin = clock();
int* results = calc_mandelbrot_set(points, points_count, maxiter);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Spent: %f seconds\n", time_spent);
if (result_path != NULL) {
result_file = fopen(result_path,"w");
if (result_file != NULL) {
printf("Writing to: \"%s\"\n", result_path);
write_set(result_file, points, results, points_count);
fclose (result_file);
printf("Done\n");
} else {
printf("Can not open result file");
return -1;
};
};
free(points);
free(results);
return 0;
}
}
|
c4c871ca979d3caefccaebf4c3ad072ac1a83227.hip
|
// !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "hip/hip_runtime.h"
#include "common.h"
void print_header() {
PRINT("# %10s %12s %6s %6s out-of-place in-place \n", "", "", "", "");
PRINT("# %10s %12s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "size", "count", "type", "redop",
"time", "algbw", "busbw", "error", "time", "algbw", "busbw", "error");
PRINT("# %10s %12s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "(B)", "(elements)", "", "",
"(us)", "(GB/s)", "(GB/s)", "", "(us)", "(GB/s)", "(GB/s)", "");
}
void print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) {
PRINT("%12li %12li %6s %6s", size, count, typeName, opName);
}
void ReduceScatterGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) {
*sendcount = (count/nranks)*nranks;
*recvcount = count/nranks;
*sendInplaceOffset = 0;
*recvInplaceOffset = count/nranks;
*paramcount = *recvcount;
}
testResult_t ReduceScatterInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) {
size_t sendcount = args->sendBytes / wordSize(type);
size_t recvcount = args->expectedBytes / wordSize(type);
int nranks = args->nProcs*args->nThreads*args->nGpus;
for (int i=0; i<args->nGpus; i++) {
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
CUDACHECK(hipSetDevice(gpuid));
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
CUDACHECK(hipMemset(args->recvbuffs[i], 0, args->expectedBytes));
void* data = in_place ? args->recvbuffs[i] : args->sendbuffs[i];
TESTCHECK(InitData(data, sendcount, type, rep, rank));
CUDACHECK(hipMemcpy(args->expected[i], args->recvbuffs[i], args->expectedBytes, hipMemcpyDefault));
TESTCHECK(InitDataReduce(args->expected[i], recvcount, rank*recvcount, type, op, rep, nranks));
CUDACHECK(hipDeviceSynchronize());
}
return testSuccess;
}
void ReduceScatterGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) {
double baseBw = (double)(count * typesize * (nranks - 1)) / 1.0E9 / sec;
*algBw = baseBw;
double factor = 1;
*busBw = baseBw * factor;
}
testResult_t ReduceScatterRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, hipStream_t stream) {
NCCLCHECK(ncclReduceScatter(sendbuff, recvbuff, count, type, op, comm, stream));
return testSuccess;
}
struct testColl reduceScatterTest = {
"ReduceScatter",
ReduceScatterGetCollByteCount,
ReduceScatterInitData,
ReduceScatterGetBw,
ReduceScatterRunColl
};
void ReduceScatterGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) {
size_t paramcount, sendInplaceOffset, recvInplaceOffset;
ReduceScatterGetCollByteCount(sendcount, recvcount, ¶mcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks);
}
testResult_t ReduceScatterRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) {
args->collTest = &reduceScatterTest;
ncclDataType_t *run_types;
ncclRedOp_t *run_ops;
const char **run_typenames, **run_opnames;
int type_count, op_count;
if ((int)type != -1) {
type_count = 1;
run_types = &type;
run_typenames = &typeName;
} else {
type_count = ncclNumTypes;
run_types = test_types;
run_typenames = test_typenames;
}
if ((int)op != -1) {
run_ops = &op;
run_opnames = &opName;
op_count = 1;
} else {
op_count = sizeof(test_ops)/sizeof(test_ops[0]);
run_ops = test_ops;
run_opnames = test_opnames;
}
for (int i=0; i<type_count; i++) {
for (int j=0; j<op_count; j++) {
TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], run_ops[j], run_opnames[j], -1));
}
}
return testSuccess;
}
struct testEngine reduceScatterEngine = {
ReduceScatterGetBuffSize,
ReduceScatterRunTest
};
#pragma weak ncclTestEngine=reduceScatterEngine
|
c4c871ca979d3caefccaebf4c3ad072ac1a83227.cu
|
/*************************************************************************
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "cuda_runtime.h"
#include "common.h"
void print_header() {
PRINT("# %10s %12s %6s %6s out-of-place in-place \n", "", "", "", "");
PRINT("# %10s %12s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "size", "count", "type", "redop",
"time", "algbw", "busbw", "error", "time", "algbw", "busbw", "error");
PRINT("# %10s %12s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "(B)", "(elements)", "", "",
"(us)", "(GB/s)", "(GB/s)", "", "(us)", "(GB/s)", "(GB/s)", "");
}
void print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) {
PRINT("%12li %12li %6s %6s", size, count, typeName, opName);
}
void ReduceScatterGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) {
*sendcount = (count/nranks)*nranks;
*recvcount = count/nranks;
*sendInplaceOffset = 0;
*recvInplaceOffset = count/nranks;
*paramcount = *recvcount;
}
testResult_t ReduceScatterInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) {
size_t sendcount = args->sendBytes / wordSize(type);
size_t recvcount = args->expectedBytes / wordSize(type);
int nranks = args->nProcs*args->nThreads*args->nGpus;
for (int i=0; i<args->nGpus; i++) {
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
CUDACHECK(cudaSetDevice(gpuid));
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
CUDACHECK(cudaMemset(args->recvbuffs[i], 0, args->expectedBytes));
void* data = in_place ? args->recvbuffs[i] : args->sendbuffs[i];
TESTCHECK(InitData(data, sendcount, type, rep, rank));
CUDACHECK(cudaMemcpy(args->expected[i], args->recvbuffs[i], args->expectedBytes, cudaMemcpyDefault));
TESTCHECK(InitDataReduce(args->expected[i], recvcount, rank*recvcount, type, op, rep, nranks));
CUDACHECK(cudaDeviceSynchronize());
}
return testSuccess;
}
void ReduceScatterGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) {
double baseBw = (double)(count * typesize * (nranks - 1)) / 1.0E9 / sec;
*algBw = baseBw;
double factor = 1;
*busBw = baseBw * factor;
}
testResult_t ReduceScatterRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, cudaStream_t stream) {
NCCLCHECK(ncclReduceScatter(sendbuff, recvbuff, count, type, op, comm, stream));
return testSuccess;
}
struct testColl reduceScatterTest = {
"ReduceScatter",
ReduceScatterGetCollByteCount,
ReduceScatterInitData,
ReduceScatterGetBw,
ReduceScatterRunColl
};
void ReduceScatterGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) {
size_t paramcount, sendInplaceOffset, recvInplaceOffset;
ReduceScatterGetCollByteCount(sendcount, recvcount, ¶mcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks);
}
testResult_t ReduceScatterRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) {
args->collTest = &reduceScatterTest;
ncclDataType_t *run_types;
ncclRedOp_t *run_ops;
const char **run_typenames, **run_opnames;
int type_count, op_count;
if ((int)type != -1) {
type_count = 1;
run_types = &type;
run_typenames = &typeName;
} else {
type_count = ncclNumTypes;
run_types = test_types;
run_typenames = test_typenames;
}
if ((int)op != -1) {
run_ops = &op;
run_opnames = &opName;
op_count = 1;
} else {
op_count = sizeof(test_ops)/sizeof(test_ops[0]);
run_ops = test_ops;
run_opnames = test_opnames;
}
for (int i=0; i<type_count; i++) {
for (int j=0; j<op_count; j++) {
TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], run_ops[j], run_opnames[j], -1));
}
}
return testSuccess;
}
struct testEngine reduceScatterEngine = {
ReduceScatterGetBuffSize,
ReduceScatterRunTest
};
#pragma weak ncclTestEngine=reduceScatterEngine
|
30c9646ea7313291aa0b3ccf62d25475c40cd76d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2021, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/components/absolute_array.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/thread_ids.cuh"
namespace gko {
namespace kernels {
namespace cuda {
namespace components {
constexpr int default_block_size = 512;
#include "common/cuda_hip/components/absolute_array.hpp.inc"
template <typename ValueType>
void inplace_absolute_array(std::shared_ptr<const DefaultExecutor> exec,
ValueType* data, size_type n)
{
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(ceildiv(n, block_size.x), 1, 1);
hipLaunchKernelGGL(( kernel::inplace_absolute_array_kernel), dim3(grid_size), dim3(block_size), 0, 0,
n, as_cuda_type(data));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_INPLACE_ABSOLUTE_ARRAY_KERNEL);
template <typename ValueType>
void outplace_absolute_array(std::shared_ptr<const DefaultExecutor> exec,
const ValueType* in, size_type n,
remove_complex<ValueType>* out)
{
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(ceildiv(n, block_size.x), 1, 1);
hipLaunchKernelGGL(( kernel::outplace_absolute_array_kernel), dim3(grid_size), dim3(block_size), 0, 0,
n, as_cuda_type(in), as_cuda_type(out));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_OUTPLACE_ABSOLUTE_ARRAY_KERNEL);
} // namespace components
} // namespace cuda
} // namespace kernels
} // namespace gko
|
30c9646ea7313291aa0b3ccf62d25475c40cd76d.cu
|
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2021, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/components/absolute_array.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/thread_ids.cuh"
namespace gko {
namespace kernels {
namespace cuda {
namespace components {
constexpr int default_block_size = 512;
#include "common/cuda_hip/components/absolute_array.hpp.inc"
template <typename ValueType>
void inplace_absolute_array(std::shared_ptr<const DefaultExecutor> exec,
ValueType* data, size_type n)
{
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(ceildiv(n, block_size.x), 1, 1);
kernel::inplace_absolute_array_kernel<<<grid_size, block_size, 0, 0>>>(
n, as_cuda_type(data));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_INPLACE_ABSOLUTE_ARRAY_KERNEL);
template <typename ValueType>
void outplace_absolute_array(std::shared_ptr<const DefaultExecutor> exec,
const ValueType* in, size_type n,
remove_complex<ValueType>* out)
{
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(ceildiv(n, block_size.x), 1, 1);
kernel::outplace_absolute_array_kernel<<<grid_size, block_size, 0, 0>>>(
n, as_cuda_type(in), as_cuda_type(out));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_OUTPLACE_ABSOLUTE_ARRAY_KERNEL);
} // namespace components
} // namespace cuda
} // namespace kernels
} // namespace gko
|
ff3168a5acd4f33136481963d741ccf6ee76701d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define N 10000000
__global__ void vector_add(float *out, float *a, float *b, int n) {
for(int i = 0; i < n; i++){
out[i] = a[i] + b[i];
}
}
int main(){
float *a, *b, *out;
float *d_a;
a = (float*)malloc(sizeof(float) * N);
// Allocate device memory for a
hipMalloc((void**)&d_a, sizeof(float) * N);
// Transfer data from host to device memory
hipMemcpy(d_a, a, sizeof(float) * N, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vector_add), dim3(1),dim3(1), 0, 0, out, d_a, b, N);
// Cleanup after kernel execution
hipFree(d_a);
free(a);
return 0;
}
|
ff3168a5acd4f33136481963d741ccf6ee76701d.cu
|
#define N 10000000
__global__ void vector_add(float *out, float *a, float *b, int n) {
for(int i = 0; i < n; i++){
out[i] = a[i] + b[i];
}
}
int main(){
float *a, *b, *out;
float *d_a;
a = (float*)malloc(sizeof(float) * N);
// Allocate device memory for a
cudaMalloc((void**)&d_a, sizeof(float) * N);
// Transfer data from host to device memory
cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
vector_add<<<1,1>>>(out, d_a, b, N);
// Cleanup after kernel execution
cudaFree(d_a);
free(a);
return 0;
}
|
9925b8aef812ee162b69d1fe64599204f2a93d0b.hip
|
// !!! This is a file automatically generated by hipify!!!
/* This is the source file contains the methods for the DG_Basis structs
*
* Author: Guodong Chen
* Email: [email protected]
* Last modified: 12/05/2019
*/
#include "stdlib.h"
#include "CUDA_Helper.cuh"
#include "DG_Quad.cuh"
#include "DG_Basis.cuh"
/* initialize the Basis struct */
hipError_t initBasisData(DG_BasisData *BasisData)
{
BasisData->order = 0;
BasisData->np = 0;
BasisData->nq1 = 0;
BasisData->sq = NULL;
BasisData->wq1 = NULL;
BasisData->EdgePhiL = NULL;
BasisData->EdgePhiR = NULL;
BasisData->nq2 = 0;
BasisData->xyq = NULL;
BasisData->wq2 = NULL;
BasisData->Phi = NULL;
BasisData->GPhix = NULL;
BasisData->GPhiy = NULL;
return hipSuccess;
}
/* allocates the initialize the DG_Basis */
hipError_t createBasisData(DG_BasisData **pBasisData)
{
CUDA_CALL(hipMallocManaged(pBasisData, sizeof(DG_BasisData)));
CUDA_CALL(initBasisData(*pBasisData));
return hipSuccess;
}
/* free the memory of the Basis Data */
hipError_t freeBasisData(DG_BasisData *BasisData)
{
CUDA_CALL(hipFree(BasisData->sq));
CUDA_CALL(hipFree(BasisData->wq1));
CUDA_CALL(hipFree(BasisData->xyq));
CUDA_CALL(hipFree(BasisData->wq2));
CUDA_CALL(hipFree(BasisData->Phi));
CUDA_CALL(hipFree(BasisData->GPhix));
CUDA_CALL(hipFree(BasisData->GPhiy));
CUDA_CALL(hipFree(BasisData->EdgePhiL[0]));
CUDA_CALL(hipFree(BasisData->EdgePhiR[0]));
CUDA_CALL(hipFree(BasisData->EdgePhiL));
CUDA_CALL(hipFree(BasisData->EdgePhiR));
CUDA_CALL(hipFree(BasisData));
return hipSuccess;
}
/* comupte the Basis data, allocate and fill in the members of BasisData */
hipError_t computeBasisData(int p, DG_BasisData *BasisData)
{
BasisData->order = p; // Basis order
BasisData->np = (p+1)*(p+2)/2; // number of degrees of freedom, np
// Get Edge Quad Points, integrates up to order 2p+1
CUDA_CALL(DG_QuadLine(2*p+1, &(BasisData->nq1), &(BasisData->sq), &(BasisData->wq1)));
// Get 2d Quad Points, integrates up to order 2p+1
CUDA_CALL(DG_QuadTriangle(2*p+1, &(BasisData->nq2), &(BasisData->xyq), &(BasisData->wq2)));
int i, j, edge, np, nq1, nq2;
// get the gradients of the basis function
np = BasisData->np;
nq1 = BasisData->nq1;
nq2 = BasisData->nq2;
double xy[2];
double GPhi[2*np];
CUDA_CALL(hipMallocManaged(&(BasisData->Phi), nq2*np*sizeof(double)));
CUDA_CALL(hipMallocManaged(&(BasisData->GPhix), nq2*np*sizeof(double)));
CUDA_CALL(hipMallocManaged(&(BasisData->GPhiy), nq2*np*sizeof(double)));
// evaluate the Phi and Gphi at the quad points
for (i=0; i<nq2; i++){
xy[0] = BasisData->xyq[2*i];
xy[1] = BasisData->xyq[2*i+1];
DG_TriLagrange(p, xy, BasisData->Phi+i*np);
DG_Grad_TriLagrange(p, xy, GPhi);
for (j=0; j<np; j++){
BasisData->GPhix[i*np+j] = GPhi[j];
BasisData->GPhiy[i*np+j] = GPhi[np+j];
}
}
// the edge basis for left and right element
double *tempL, *tempR;
CUDA_CALL(hipMallocManaged(&(BasisData->EdgePhiL), 3*sizeof(double *)));
CUDA_CALL(hipMallocManaged(&tempL, 3*nq1*np*sizeof(double)));
CUDA_CALL(hipMallocManaged(&(BasisData->EdgePhiR), 3*sizeof(double *)));
CUDA_CALL(hipMallocManaged(&tempR, 3*nq1*np*sizeof(double)));
// evaluate the basis at the egde nodes
for (edge=0; edge<3; edge++){
BasisData->EdgePhiL[edge] = tempL + edge*nq1*np;
BasisData->EdgePhiR[edge] = tempR + edge*nq1*np;
for (i=0; i<nq1; i++){
RefEdge2Elem(edge, xy, BasisData->sq[i]);
DG_TriLagrange(p, xy, BasisData->EdgePhiL[edge]+i*np);
RefEdge2Elem(edge, xy, 1.0-BasisData->sq[i]);
DG_TriLagrange(p, xy, BasisData->EdgePhiR[edge]+i*np);
}
}
return hipSuccess;
}
/* map 1d edge nodes to elem 2d coords */
hipError_t RefEdge2Elem(const int edge, double *xy, const double sq)
{
switch (edge){
case 0:
xy[0] = 1.0-sq;
xy[1] = sq;
break;
case 1:
xy[0] = 0.0;
xy[1] = 1.0-sq;
break;
case 2:
xy[0] = sq;
xy[1] = 0.0;
break;
default:
return hipErrorNotSupported;
break;
}
return hipSuccess;
}
/* evaluate basis function at xy */
hipError_t DG_TriLagrange(int p, const double *xy, double *phi)
{
double x, y;
x = xy[0];
y = xy[1];
switch (p) {
case 0:
phi[0] = 1.0;
return hipSuccess;
break;
case 1:
phi[0] = 1-x-y;
phi[1] = x ;
phi[2] = y;
return hipSuccess;
break;
case 2:
phi[0] = 1.0-3.0*x-3.0*y+2.0*x*x+4.0*x*y+2.0*y*y;
phi[2] = -x+2.0*x*x;
phi[5] = -y+2.0*y*y;
phi[4] = 4.0*x*y;
phi[3] = 4.0*y-4.0*x*y-4.0*y*y;
phi[1] = 4.0*x-4.0*x*x-4.0*x*y;
return hipSuccess;
break;
case 3:
phi[0] = 1.0-11.0/2.0*x-11.0/2.0*y+9.0*x*x+18.0*x*y+9.0*y*y-9.0/2.0*x*x*x-27.0/2.0*x*x*y-27.0/2.0*x*y*y-9.0/2.0*y*y*y;
phi[3] = x-9.0/2.0*x*x+9.0/2.0*x*x*x;
phi[9] = y-9.0/2.0*y*y+9.0/2.0*y*y*y;
phi[6] = -9.0/2.0*x*y+27.0/2.0*x*x*y;
phi[8] = -9.0/2.0*x*y+27.0/2.0*x*y*y;
phi[7] = -9.0/2.0*y+9.0/2.0*x*y+18.0*y*y-27.0/2.0*x*y*y-27.0/2.0*y*y*y;
phi[4] = 9.0*y-45.0/2.0*x*y-45.0/2.0*y*y+27.0/2.0*x*x*y+27.0*x*y*y+27.0/2.0*y*y*y;
phi[1] = 9.0*x-45.0/2.0*x*x-45.0/2.0*x*y+27.0/2.0*x*x*x+27.0*x*x*y+27.0/2.0*x*y*y;
phi[2] = -9.0/2.0*x+18.0*x*x+9.0/2.0*x*y-27.0/2.0*x*x*x-27.0/2.0*x*x*y;
phi[5] = 27.0*x*y-27.0*x*x*y-27.0*x*y*y;
return hipSuccess;
break;
default:
return hipErrorNotSupported;
break;
}
return hipSuccess;
}
// gradients of basis functions at reference elements
hipError_t DG_Grad_TriLagrange(int p, const double *xy, double *gphi)
{
double x, y;
int n = (p+1)*(p+2)/2;
x = xy[0];
y = xy[1];
switch (p){
case 0:
gphi[0] = 0.0;
gphi[n+0] = 0.0;
break;
case 1:
gphi[0] = -1.0;
gphi[1] = 1.0;
gphi[2] = 0.0;
gphi[n+0] = -1.0;
gphi[n+1] = 0.0;
gphi[n+2] = 1.0;
break;
case 2:
gphi[0] = -3.0+4.0*x+4.0*y;
gphi[2] = -1.0+4.0*x;
gphi[5] = 0.0;
gphi[4] = 4.0*y;
gphi[3] = -4.0*y;
gphi[1] = 4.0-8.0*x-4.0*y;
gphi[n+0] = -3.0+4.0*x+4.0*y;
gphi[n+2] = 0.0;
gphi[n+5] = -1.0+4.0*y;
gphi[n+4] = 4.0*x;
gphi[n+3] = 4.0-4.0*x-8.0*y;
gphi[n+1] = -4.0*x;
break;
case 3:
gphi[0] = -11.0/2.0+18.0*x+18.0*y-27.0/2.0*x*x-27.0*x*y-27.0/2.0*y*y;
gphi[3] = 1.0-9.0*x+27.0/2.0*x*x;
gphi[9] = 0.0;
gphi[6] = -9.0/2.0*y+27.0*x*y;
gphi[8] = -9.0/2.0*y+27.0/2.0*y*y;
gphi[7] = 9.0/2.0*y-27.0/2.0*y*y;
gphi[4] = -45.0/2.0*y+27.0*x*y+27.0*y*y;
gphi[1] = 9.0-45.0*x-45.0/2.0*y+81.0/2.0*x*x+54.0*x*y+27.0/2.0*y*y;
gphi[2] = -9.0/2.0+36.0*x+9.0/2.0*y-81.0/2.0*x*x-27.0*x*y;
gphi[5] = 27.0*y-54.0*x*y-27.0*y*y;
gphi[n+0] = -11.0/2.0+18.0*x+18.0*y-27.0/2.0*x*x-27.0*x*y-27.0/2.0*y*y;
gphi[n+3] = 0.0;
gphi[n+9] = 1.0-9.0*y+27.0/2.0*y*y;
gphi[n+6] = -9.0/2.0*x+27.0/2.0*x*x;
gphi[n+8] = -9.0/2.0*x+27.0*x*y;
gphi[n+7] = -9.0/2.0+9.0/2.0*x+36.0*y-27.0*x*y-81.0/2.0*y*y;
gphi[n+4] = 9.0-45.0/2.0*x-45.0*y+27.0/2.0*x*x+54.0*x*y+81.0/2.0*y*y;
gphi[n+1] = -45.0/2.0*x+27.0*x*x+27.0*x*y;
gphi[n+2] = 9.0/2.0*x-27.0/2.0*x*x;
gphi[n+5] = 27.0*x-27.0*x*x-54.0*x*y;
break;
default:
return hipErrorNotSupported;
break;
}
return hipSuccess;
}
|
9925b8aef812ee162b69d1fe64599204f2a93d0b.cu
|
/* This is the source file contains the methods for the DG_Basis structs
*
* Author: Guodong Chen
* Email: [email protected]
* Last modified: 12/05/2019
*/
#include "stdlib.h"
#include "CUDA_Helper.cuh"
#include "DG_Quad.cuh"
#include "DG_Basis.cuh"
/* initialize the Basis struct */
cudaError_t initBasisData(DG_BasisData *BasisData)
{
BasisData->order = 0;
BasisData->np = 0;
BasisData->nq1 = 0;
BasisData->sq = NULL;
BasisData->wq1 = NULL;
BasisData->EdgePhiL = NULL;
BasisData->EdgePhiR = NULL;
BasisData->nq2 = 0;
BasisData->xyq = NULL;
BasisData->wq2 = NULL;
BasisData->Phi = NULL;
BasisData->GPhix = NULL;
BasisData->GPhiy = NULL;
return cudaSuccess;
}
/* allocates the initialize the DG_Basis */
cudaError_t createBasisData(DG_BasisData **pBasisData)
{
CUDA_CALL(cudaMallocManaged(pBasisData, sizeof(DG_BasisData)));
CUDA_CALL(initBasisData(*pBasisData));
return cudaSuccess;
}
/* free the memory of the Basis Data */
cudaError_t freeBasisData(DG_BasisData *BasisData)
{
CUDA_CALL(cudaFree(BasisData->sq));
CUDA_CALL(cudaFree(BasisData->wq1));
CUDA_CALL(cudaFree(BasisData->xyq));
CUDA_CALL(cudaFree(BasisData->wq2));
CUDA_CALL(cudaFree(BasisData->Phi));
CUDA_CALL(cudaFree(BasisData->GPhix));
CUDA_CALL(cudaFree(BasisData->GPhiy));
CUDA_CALL(cudaFree(BasisData->EdgePhiL[0]));
CUDA_CALL(cudaFree(BasisData->EdgePhiR[0]));
CUDA_CALL(cudaFree(BasisData->EdgePhiL));
CUDA_CALL(cudaFree(BasisData->EdgePhiR));
CUDA_CALL(cudaFree(BasisData));
return cudaSuccess;
}
/* comupte the Basis data, allocate and fill in the members of BasisData */
cudaError_t computeBasisData(int p, DG_BasisData *BasisData)
{
BasisData->order = p; // Basis order
BasisData->np = (p+1)*(p+2)/2; // number of degrees of freedom, np
// Get Edge Quad Points, integrates up to order 2p+1
CUDA_CALL(DG_QuadLine(2*p+1, &(BasisData->nq1), &(BasisData->sq), &(BasisData->wq1)));
// Get 2d Quad Points, integrates up to order 2p+1
CUDA_CALL(DG_QuadTriangle(2*p+1, &(BasisData->nq2), &(BasisData->xyq), &(BasisData->wq2)));
int i, j, edge, np, nq1, nq2;
// get the gradients of the basis function
np = BasisData->np;
nq1 = BasisData->nq1;
nq2 = BasisData->nq2;
double xy[2];
double GPhi[2*np];
CUDA_CALL(cudaMallocManaged(&(BasisData->Phi), nq2*np*sizeof(double)));
CUDA_CALL(cudaMallocManaged(&(BasisData->GPhix), nq2*np*sizeof(double)));
CUDA_CALL(cudaMallocManaged(&(BasisData->GPhiy), nq2*np*sizeof(double)));
// evaluate the Phi and Gphi at the quad points
for (i=0; i<nq2; i++){
xy[0] = BasisData->xyq[2*i];
xy[1] = BasisData->xyq[2*i+1];
DG_TriLagrange(p, xy, BasisData->Phi+i*np);
DG_Grad_TriLagrange(p, xy, GPhi);
for (j=0; j<np; j++){
BasisData->GPhix[i*np+j] = GPhi[j];
BasisData->GPhiy[i*np+j] = GPhi[np+j];
}
}
// the edge basis for left and right element
double *tempL, *tempR;
CUDA_CALL(cudaMallocManaged(&(BasisData->EdgePhiL), 3*sizeof(double *)));
CUDA_CALL(cudaMallocManaged(&tempL, 3*nq1*np*sizeof(double)));
CUDA_CALL(cudaMallocManaged(&(BasisData->EdgePhiR), 3*sizeof(double *)));
CUDA_CALL(cudaMallocManaged(&tempR, 3*nq1*np*sizeof(double)));
// evaluate the basis at the egde nodes
for (edge=0; edge<3; edge++){
BasisData->EdgePhiL[edge] = tempL + edge*nq1*np;
BasisData->EdgePhiR[edge] = tempR + edge*nq1*np;
for (i=0; i<nq1; i++){
RefEdge2Elem(edge, xy, BasisData->sq[i]);
DG_TriLagrange(p, xy, BasisData->EdgePhiL[edge]+i*np);
RefEdge2Elem(edge, xy, 1.0-BasisData->sq[i]);
DG_TriLagrange(p, xy, BasisData->EdgePhiR[edge]+i*np);
}
}
return cudaSuccess;
}
/* map 1d edge nodes to elem 2d coords */
cudaError_t RefEdge2Elem(const int edge, double *xy, const double sq)
{
switch (edge){
case 0:
xy[0] = 1.0-sq;
xy[1] = sq;
break;
case 1:
xy[0] = 0.0;
xy[1] = 1.0-sq;
break;
case 2:
xy[0] = sq;
xy[1] = 0.0;
break;
default:
return cudaErrorNotSupported;
break;
}
return cudaSuccess;
}
/* evaluate basis function at xy */
cudaError_t DG_TriLagrange(int p, const double *xy, double *phi)
{
double x, y;
x = xy[0];
y = xy[1];
switch (p) {
case 0:
phi[0] = 1.0;
return cudaSuccess;
break;
case 1:
phi[0] = 1-x-y;
phi[1] = x ;
phi[2] = y;
return cudaSuccess;
break;
case 2:
phi[0] = 1.0-3.0*x-3.0*y+2.0*x*x+4.0*x*y+2.0*y*y;
phi[2] = -x+2.0*x*x;
phi[5] = -y+2.0*y*y;
phi[4] = 4.0*x*y;
phi[3] = 4.0*y-4.0*x*y-4.0*y*y;
phi[1] = 4.0*x-4.0*x*x-4.0*x*y;
return cudaSuccess;
break;
case 3:
phi[0] = 1.0-11.0/2.0*x-11.0/2.0*y+9.0*x*x+18.0*x*y+9.0*y*y-9.0/2.0*x*x*x-27.0/2.0*x*x*y-27.0/2.0*x*y*y-9.0/2.0*y*y*y;
phi[3] = x-9.0/2.0*x*x+9.0/2.0*x*x*x;
phi[9] = y-9.0/2.0*y*y+9.0/2.0*y*y*y;
phi[6] = -9.0/2.0*x*y+27.0/2.0*x*x*y;
phi[8] = -9.0/2.0*x*y+27.0/2.0*x*y*y;
phi[7] = -9.0/2.0*y+9.0/2.0*x*y+18.0*y*y-27.0/2.0*x*y*y-27.0/2.0*y*y*y;
phi[4] = 9.0*y-45.0/2.0*x*y-45.0/2.0*y*y+27.0/2.0*x*x*y+27.0*x*y*y+27.0/2.0*y*y*y;
phi[1] = 9.0*x-45.0/2.0*x*x-45.0/2.0*x*y+27.0/2.0*x*x*x+27.0*x*x*y+27.0/2.0*x*y*y;
phi[2] = -9.0/2.0*x+18.0*x*x+9.0/2.0*x*y-27.0/2.0*x*x*x-27.0/2.0*x*x*y;
phi[5] = 27.0*x*y-27.0*x*x*y-27.0*x*y*y;
return cudaSuccess;
break;
default:
return cudaErrorNotSupported;
break;
}
return cudaSuccess;
}
// gradients of basis functions at reference elements
cudaError_t DG_Grad_TriLagrange(int p, const double *xy, double *gphi)
{
double x, y;
int n = (p+1)*(p+2)/2;
x = xy[0];
y = xy[1];
switch (p){
case 0:
gphi[0] = 0.0;
gphi[n+0] = 0.0;
break;
case 1:
gphi[0] = -1.0;
gphi[1] = 1.0;
gphi[2] = 0.0;
gphi[n+0] = -1.0;
gphi[n+1] = 0.0;
gphi[n+2] = 1.0;
break;
case 2:
gphi[0] = -3.0+4.0*x+4.0*y;
gphi[2] = -1.0+4.0*x;
gphi[5] = 0.0;
gphi[4] = 4.0*y;
gphi[3] = -4.0*y;
gphi[1] = 4.0-8.0*x-4.0*y;
gphi[n+0] = -3.0+4.0*x+4.0*y;
gphi[n+2] = 0.0;
gphi[n+5] = -1.0+4.0*y;
gphi[n+4] = 4.0*x;
gphi[n+3] = 4.0-4.0*x-8.0*y;
gphi[n+1] = -4.0*x;
break;
case 3:
gphi[0] = -11.0/2.0+18.0*x+18.0*y-27.0/2.0*x*x-27.0*x*y-27.0/2.0*y*y;
gphi[3] = 1.0-9.0*x+27.0/2.0*x*x;
gphi[9] = 0.0;
gphi[6] = -9.0/2.0*y+27.0*x*y;
gphi[8] = -9.0/2.0*y+27.0/2.0*y*y;
gphi[7] = 9.0/2.0*y-27.0/2.0*y*y;
gphi[4] = -45.0/2.0*y+27.0*x*y+27.0*y*y;
gphi[1] = 9.0-45.0*x-45.0/2.0*y+81.0/2.0*x*x+54.0*x*y+27.0/2.0*y*y;
gphi[2] = -9.0/2.0+36.0*x+9.0/2.0*y-81.0/2.0*x*x-27.0*x*y;
gphi[5] = 27.0*y-54.0*x*y-27.0*y*y;
gphi[n+0] = -11.0/2.0+18.0*x+18.0*y-27.0/2.0*x*x-27.0*x*y-27.0/2.0*y*y;
gphi[n+3] = 0.0;
gphi[n+9] = 1.0-9.0*y+27.0/2.0*y*y;
gphi[n+6] = -9.0/2.0*x+27.0/2.0*x*x;
gphi[n+8] = -9.0/2.0*x+27.0*x*y;
gphi[n+7] = -9.0/2.0+9.0/2.0*x+36.0*y-27.0*x*y-81.0/2.0*y*y;
gphi[n+4] = 9.0-45.0/2.0*x-45.0*y+27.0/2.0*x*x+54.0*x*y+81.0/2.0*y*y;
gphi[n+1] = -45.0/2.0*x+27.0*x*x+27.0*x*y;
gphi[n+2] = 9.0/2.0*x-27.0/2.0*x*x;
gphi[n+5] = 27.0*x-27.0*x*x-54.0*x*y;
break;
default:
return cudaErrorNotSupported;
break;
}
return cudaSuccess;
}
|
fdb8b7531c18626200ff5c687c507ac3e3312c23.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialMaxPooling.cu"
#else
#include <THHUNN/common.h>
void THNN_(SpatialMaxPooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
bool ceil_mode)
{
THNN_(SpatialDilatedMaxPooling_updateOutput)(
state, input, output, indices,
kW, kH, dW, dH, padW, padH, 1, 1, ceil_mode);
}
void THNN_(SpatialMaxPooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
bool ceil_mode)
{
THNN_(SpatialDilatedMaxPooling_updateGradInput)(
state, input, gradOutput, gradInput, indices,
kW, kH, dW, dH, padW, padH, 1, 1, ceil_mode);
}
#endif
|
fdb8b7531c18626200ff5c687c507ac3e3312c23.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialMaxPooling.cu"
#else
#include <THCUNN/common.h>
void THNN_(SpatialMaxPooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
bool ceil_mode)
{
THNN_(SpatialDilatedMaxPooling_updateOutput)(
state, input, output, indices,
kW, kH, dW, dH, padW, padH, 1, 1, ceil_mode);
}
void THNN_(SpatialMaxPooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
bool ceil_mode)
{
THNN_(SpatialDilatedMaxPooling_updateGradInput)(
state, input, gradOutput, gradInput, indices,
kW, kH, dW, dH, padW, padH, 1, 1, ceil_mode);
}
#endif
|
0f76c4464d00370d6791b9ff6fefb3a3a029981f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/core/Array.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <c10/hip/HIPMathCompat.h>
namespace at {
namespace native {
// -----------------------------------
// prelu forward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_kernel_share_weights(
const Tensor& input,
Tensor& result,
const scalar_t* weight_data)
{
auto iter = TensorIterator::unary_op(result, input);
at::native::gpu_kernel(iter,
[weight_data] GPU_LAMBDA (scalar_t input_val) {
return (input_val > 0) ? input_val : *weight_data * input_val;
});
}
template <typename scalar_t>
__global__ void prelu_cuda_kernel_multi_weights(
scalar_t* result_data,
const scalar_t* input_data,
const scalar_t* weight_data,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
// multiply values at each channel with weight[channel_index]
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val;
}
Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
int64_t weight_num = weight.numel();
Tensor result = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto strides = input.strides();
// case1: shared weight for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
prelu_cuda_kernel_share_weights<scalar_t>(
input,
result,
weight.data_ptr<scalar_t>());
});
}
else { // case2: multiple weights, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
hipLaunchKernelGGL(( prelu_cuda_kernel_multi_weights<scalar_t>)
, dim3(grid), dim3(block), 0, stream,
result.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
return result;
}
// -----------------------------------
// prelu backward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_backward_kernel_share_weights(
const Tensor& input,
const Tensor& grad_out,
Tensor& input_grad,
Tensor& weight_grad_collector,
const scalar_t* weight_data) {
at::TensorIterator iter = TensorIteratorConfig()
.add_output(input_grad)
.add_output(weight_grad_collector)
.add_input(input)
.add_input(grad_out)
.build();
// N.B. `std::tuple` does not support `::operator=` on device code.
gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t input, scalar_t grad_out) -> thrust::tuple<scalar_t, scalar_t> {
scalar_t input_grad = input > 0 ? grad_out : (*weight_data) * grad_out;
scalar_t weight_grad_collector = input > 0 ? scalar_t(0) : input * grad_out;
return {input_grad, weight_grad_collector};
});
}
template <typename scalar_t>
__global__ void prelu_cuda_backward_kernel_multi_weights(
const scalar_t* input_data,
const scalar_t* weight_data,
const scalar_t* grad_out_data,
scalar_t* input_grad_data,
scalar_t* weight_grad_collector,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
scalar_t grad_out_data_val = grad_out_data[linearId];
input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val;
weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val;
}
std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(grad_out_.is_cuda());
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto grad_out = grad_out_.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
TORCH_CHECK(grad_out.is_contiguous());
int64_t weight_num = weight.numel();
auto strides = input.strides();
auto dims = input.dim();
Tensor input_grad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad = at::empty_like(weight, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad_collector = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// case1: shared parameter for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_share_weights<scalar_t>(
input,
grad_out,
input_grad,
weight_grad_collector,
weight.data_ptr<scalar_t>());
});
weight_grad.fill_(weight_grad_collector.sum());
}
else { // case2: multiple parameters, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
hipLaunchKernelGGL(( prelu_cuda_backward_kernel_multi_weights<scalar_t>)
, dim3(grid), dim3(block), 0, stream,
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
grad_out.data_ptr<scalar_t>(),
input_grad.data_ptr<scalar_t>(),
weight_grad_collector.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
// update weight_grad
std::vector<int64_t> reduce_dims;
reduce_dims.push_back(0);
if (dims > 2) {
for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i);
}
weight_grad = weight_grad_collector.sum(reduce_dims);
}
return std::tuple<Tensor, Tensor>{input_grad, weight_grad};
}
// -----------------------------------
// hardshrink
// -----------------------------------
void hardshrink_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;
});
});
}
void softshrink_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0));
});
});
}
void shrink_backward_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t {
return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val;
});
});
}
void hardtanh_backward_kernel(TensorIterator& iter, const Scalar& min, const Scalar& max) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() {
auto min_val = min.to<scalar_t>();
auto max_val = max.to<scalar_t>();
gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a;
});
});
}
void softplus_kernel(TensorIterator& iter, const Scalar& beta_, const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a * beta) > threshold ? a : static_cast<scalar_t>(::log1p(::exp(a * beta))) / beta;
});
});
}
void softplus_backward_kernel(TensorIterator& iter, const Scalar& beta_, const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t z = ::exp(b * beta);
return (b * beta) > threshold ? a : a * (z - scalar_t(1.)) / z;
});
});
}
template <typename scalar_t>
void threshold_kernel_impl(TensorIterator& iter, scalar_t threshold, scalar_t value) {
gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {
return x <= threshold ? value : other;
});
}
static void threshold_kernel(TensorIterator& iter, const Scalar& threshold, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] {
threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>());
});
}
void elu_kernel(TensorIterator& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a * poscoef : (static_cast<scalar_t>(::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef;
});
});
}
void elu_backward_kernel(TensorIterator& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale, bool is_result) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef, is_result]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
if (is_result) {
return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef;
} else {
return b <= scalar_t(0) ? a * negiptcoef * negcoef * (static_cast<scalar_t>(::exp(b * negiptcoef))) : a * poscoef;
}
});
});
}
namespace {
void GeluCUDAKernelImpl(TensorIterator& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
return static_cast<T_ACC>(x) *
c10::hip::compat::normcdf(static_cast<T_ACC>(x));
});
});
}
void GeluBackwardCUDAKernelImpl(TensorIterator& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5);
const T_ACC cdf = c10::hip::compat::normcdf(static_cast<T_ACC>(x));
const T_ACC pdf =
c10::hip::compat::exp(
T_ACC(-0.5) * static_cast<T_ACC>(x) * static_cast<T_ACC>(x)) *
kBeta;
return static_cast<T_ACC>(dy) * (cdf + static_cast<T_ACC>(x) * pdf);
});
});
}
void leaky_relu_kernel(TensorIterator& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a : a * negval;
});
});
}
void leaky_relu_backward_kernel(TensorIterator& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a > scalar_t(0) ? b : b * negval;
});
});
}
void hardswish_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC one_sixth(1.0f / 6.0f);
const T_ACC three(3.0f);
const T_ACC six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
T_ACC x = static_cast<T_ACC>(self_val);
return x * ::min(::max(x + three, zero), six) * one_sixth;
});
});
}
void hardswish_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC three(3.0f);
const T_ACC neg_three(-3.0f);
const T_ACC one_half(0.5f);
gpu_kernel(
iter,
[zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
T_ACC grad_val = static_cast<T_ACC>(grad_val_);
T_ACC self_val = static_cast<T_ACC>(self_val_);
if (self_val < neg_three) {
return zero;
} else if (self_val <= three) {
return grad_val * ((self_val / three) + one_half);
} else {
return grad_val;
}
});
});
}
void hardsigmoid_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC one_sixth(1.0f / 6.0f);
const T_ACC three(3.0f);
const T_ACC six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
T_ACC x = static_cast<T_ACC>(self_val);
return ::min(::max(x + three, zero), six) * one_sixth;
});
});
}
void hardsigmoid_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_backward_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC three(3.0f);
const T_ACC neg_three(-3.0f);
const T_ACC one_sixth(1.0f / 6.0f);
gpu_kernel(
iter,
[zero, three, neg_three, one_sixth]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
T_ACC grad_val = static_cast<T_ACC>(grad_val_);
T_ACC self_val = static_cast<T_ACC>(self_val_);
return (self_val > neg_three && self_val < three)
? grad_val * one_sixth
: zero;
});
});
}
void silu_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC x_acc = static_cast<T_ACC>(x);
return x_acc / (T_ACC(1) + c10::hip::compat::exp(-x_acc));
});
});
}
void silu_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_backward_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
const T_ACC s_acc =
T_ACC(1) / (T_ACC(1) + c10::hip::compat::exp(-x_acc));
return dy_acc * s_acc * (T_ACC(1) + x_acc * (T_ACC(1) - s_acc));
});
});
}
} // namespace
Tensor gelu_cuda(const Tensor& self) {
Tensor Y = at::native::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto it = TensorIterator::unary_op(Y, self);
GeluCUDAKernelImpl(it);
return Y;
}
Tensor gelu_backward_cuda(const Tensor& grad, const Tensor& self) {
Tensor dX = at::native::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto it = TensorIterator::binary_op(dX, grad, self);
GeluBackwardCUDAKernelImpl(it);
return dX;
}
// computes `result = self <= threshold ? value : other`
// other is `self` in threshold() and `grad` in threshold_backward()
static Tensor threshold_out_cuda(
optional<Tensor> opt_result,
const Tensor& self,
const Scalar& threshold,
const Scalar& value,
const Tensor& other) {
Tensor result = opt_result.value_or(Tensor());
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false) // threshold is idempotent, so overlap is okay
.add_output(result)
.add_input(self)
.add_input(other)
.allow_cpu_scalars(true)
.promote_inputs_to_common_dtype(true)
.cast_common_dtype_to_outputs(true)
.enforce_safe_casting_to_output(true)
.build();
threshold_kernel(iter, threshold, value);
return iter.output();
}
Tensor threshold_cuda(const Tensor& self, const Scalar& threshold, const Scalar& value) {
return threshold_out_cuda(nullopt, self, threshold, value, self);
}
Tensor& threshold__cuda(Tensor& self, const Scalar& threshold, const Scalar& value) {
threshold_out_cuda(make_optional(self), self, threshold, value, self);
return self;
}
Tensor& threshold_out_cuda(const Tensor& self, const Scalar& threshold, const Scalar& value, Tensor& result) {
threshold_out_cuda(make_optional(result), self, threshold, value, self);
return result;
}
Tensor threshold_backward_cuda(const Tensor& grad, const Tensor& self, const Scalar& threshold) {
return threshold_out_cuda(nullopt, self, threshold, 0, grad);
}
REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel);
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);
REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);
REGISTER_DISPATCH(elu_stub, &elu_kernel);
REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel);
REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel);
REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel);
REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel);
REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel);
REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel);
REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel);
REGISTER_DISPATCH(softplus_stub, &softplus_kernel);
REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel);
REGISTER_DISPATCH(silu_stub, &silu_kernel);
REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel);
} // namespace native
} // namespace at
|
0f76c4464d00370d6791b9ff6fefb3a3a029981f.cu
|
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/core/Array.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <c10/cuda/CUDAMathCompat.h>
namespace at {
namespace native {
// -----------------------------------
// prelu forward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_kernel_share_weights(
const Tensor& input,
Tensor& result,
const scalar_t* weight_data)
{
auto iter = TensorIterator::unary_op(result, input);
at::native::gpu_kernel(iter,
[weight_data] GPU_LAMBDA (scalar_t input_val) {
return (input_val > 0) ? input_val : *weight_data * input_val;
});
}
template <typename scalar_t>
__global__ void prelu_cuda_kernel_multi_weights(
scalar_t* result_data,
const scalar_t* input_data,
const scalar_t* weight_data,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
// multiply values at each channel with weight[channel_index]
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val;
}
Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
int64_t weight_num = weight.numel();
Tensor result = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto strides = input.strides();
// case1: shared weight for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
prelu_cuda_kernel_share_weights<scalar_t>(
input,
result,
weight.data_ptr<scalar_t>());
});
}
else { // case2: multiple weights, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
prelu_cuda_kernel_multi_weights<scalar_t>
<<<grid, block, 0, stream>>>(
result.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
return result;
}
// -----------------------------------
// prelu backward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_backward_kernel_share_weights(
const Tensor& input,
const Tensor& grad_out,
Tensor& input_grad,
Tensor& weight_grad_collector,
const scalar_t* weight_data) {
at::TensorIterator iter = TensorIteratorConfig()
.add_output(input_grad)
.add_output(weight_grad_collector)
.add_input(input)
.add_input(grad_out)
.build();
// N.B. `std::tuple` does not support `::operator=` on device code.
gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t input, scalar_t grad_out) -> thrust::tuple<scalar_t, scalar_t> {
scalar_t input_grad = input > 0 ? grad_out : (*weight_data) * grad_out;
scalar_t weight_grad_collector = input > 0 ? scalar_t(0) : input * grad_out;
return {input_grad, weight_grad_collector};
});
}
template <typename scalar_t>
__global__ void prelu_cuda_backward_kernel_multi_weights(
const scalar_t* input_data,
const scalar_t* weight_data,
const scalar_t* grad_out_data,
scalar_t* input_grad_data,
scalar_t* weight_grad_collector,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
scalar_t grad_out_data_val = grad_out_data[linearId];
input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val;
weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val;
}
std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(grad_out_.is_cuda());
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto grad_out = grad_out_.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
TORCH_CHECK(grad_out.is_contiguous());
int64_t weight_num = weight.numel();
auto strides = input.strides();
auto dims = input.dim();
Tensor input_grad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad = at::empty_like(weight, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad_collector = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// case1: shared parameter for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_share_weights<scalar_t>(
input,
grad_out,
input_grad,
weight_grad_collector,
weight.data_ptr<scalar_t>());
});
weight_grad.fill_(weight_grad_collector.sum());
}
else { // case2: multiple parameters, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_multi_weights<scalar_t>
<<<grid, block, 0, stream>>>(
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
grad_out.data_ptr<scalar_t>(),
input_grad.data_ptr<scalar_t>(),
weight_grad_collector.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
// update weight_grad
std::vector<int64_t> reduce_dims;
reduce_dims.push_back(0);
if (dims > 2) {
for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i);
}
weight_grad = weight_grad_collector.sum(reduce_dims);
}
return std::tuple<Tensor, Tensor>{input_grad, weight_grad};
}
// -----------------------------------
// hardshrink
// -----------------------------------
void hardshrink_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;
});
});
}
void softshrink_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0));
});
});
}
void shrink_backward_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t {
return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val;
});
});
}
void hardtanh_backward_kernel(TensorIterator& iter, const Scalar& min, const Scalar& max) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() {
auto min_val = min.to<scalar_t>();
auto max_val = max.to<scalar_t>();
gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a;
});
});
}
void softplus_kernel(TensorIterator& iter, const Scalar& beta_, const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a * beta) > threshold ? a : static_cast<scalar_t>(::log1p(std::exp(a * beta))) / beta;
});
});
}
void softplus_backward_kernel(TensorIterator& iter, const Scalar& beta_, const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t z = std::exp(b * beta);
return (b * beta) > threshold ? a : a * (z - scalar_t(1.)) / z;
});
});
}
template <typename scalar_t>
void threshold_kernel_impl(TensorIterator& iter, scalar_t threshold, scalar_t value) {
gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {
return x <= threshold ? value : other;
});
}
static void threshold_kernel(TensorIterator& iter, const Scalar& threshold, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] {
threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>());
});
}
void elu_kernel(TensorIterator& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a * poscoef : (static_cast<scalar_t>(std::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef;
});
});
}
void elu_backward_kernel(TensorIterator& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale, bool is_result) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef, is_result]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
if (is_result) {
return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef;
} else {
return b <= scalar_t(0) ? a * negiptcoef * negcoef * (static_cast<scalar_t>(std::exp(b * negiptcoef))) : a * poscoef;
}
});
});
}
namespace {
void GeluCUDAKernelImpl(TensorIterator& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
return static_cast<T_ACC>(x) *
c10::cuda::compat::normcdf(static_cast<T_ACC>(x));
});
});
}
void GeluBackwardCUDAKernelImpl(TensorIterator& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5);
const T_ACC cdf = c10::cuda::compat::normcdf(static_cast<T_ACC>(x));
const T_ACC pdf =
c10::cuda::compat::exp(
T_ACC(-0.5) * static_cast<T_ACC>(x) * static_cast<T_ACC>(x)) *
kBeta;
return static_cast<T_ACC>(dy) * (cdf + static_cast<T_ACC>(x) * pdf);
});
});
}
void leaky_relu_kernel(TensorIterator& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a : a * negval;
});
});
}
void leaky_relu_backward_kernel(TensorIterator& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a > scalar_t(0) ? b : b * negval;
});
});
}
void hardswish_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC one_sixth(1.0f / 6.0f);
const T_ACC three(3.0f);
const T_ACC six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
T_ACC x = static_cast<T_ACC>(self_val);
return x * std::min(std::max(x + three, zero), six) * one_sixth;
});
});
}
void hardswish_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC three(3.0f);
const T_ACC neg_three(-3.0f);
const T_ACC one_half(0.5f);
gpu_kernel(
iter,
[zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
T_ACC grad_val = static_cast<T_ACC>(grad_val_);
T_ACC self_val = static_cast<T_ACC>(self_val_);
if (self_val < neg_three) {
return zero;
} else if (self_val <= three) {
return grad_val * ((self_val / three) + one_half);
} else {
return grad_val;
}
});
});
}
void hardsigmoid_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC one_sixth(1.0f / 6.0f);
const T_ACC three(3.0f);
const T_ACC six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
T_ACC x = static_cast<T_ACC>(self_val);
return std::min(std::max(x + three, zero), six) * one_sixth;
});
});
}
void hardsigmoid_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_backward_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC three(3.0f);
const T_ACC neg_three(-3.0f);
const T_ACC one_sixth(1.0f / 6.0f);
gpu_kernel(
iter,
[zero, three, neg_three, one_sixth]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
T_ACC grad_val = static_cast<T_ACC>(grad_val_);
T_ACC self_val = static_cast<T_ACC>(self_val_);
return (self_val > neg_three && self_val < three)
? grad_val * one_sixth
: zero;
});
});
}
void silu_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC x_acc = static_cast<T_ACC>(x);
return x_acc / (T_ACC(1) + c10::cuda::compat::exp(-x_acc));
});
});
}
void silu_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_backward_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
const T_ACC s_acc =
T_ACC(1) / (T_ACC(1) + c10::cuda::compat::exp(-x_acc));
return dy_acc * s_acc * (T_ACC(1) + x_acc * (T_ACC(1) - s_acc));
});
});
}
} // namespace
Tensor gelu_cuda(const Tensor& self) {
Tensor Y = at::native::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto it = TensorIterator::unary_op(Y, self);
GeluCUDAKernelImpl(it);
return Y;
}
Tensor gelu_backward_cuda(const Tensor& grad, const Tensor& self) {
Tensor dX = at::native::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto it = TensorIterator::binary_op(dX, grad, self);
GeluBackwardCUDAKernelImpl(it);
return dX;
}
// computes `result = self <= threshold ? value : other`
// other is `self` in threshold() and `grad` in threshold_backward()
static Tensor threshold_out_cuda(
optional<Tensor> opt_result,
const Tensor& self,
const Scalar& threshold,
const Scalar& value,
const Tensor& other) {
Tensor result = opt_result.value_or(Tensor());
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false) // threshold is idempotent, so overlap is okay
.add_output(result)
.add_input(self)
.add_input(other)
.allow_cpu_scalars(true)
.promote_inputs_to_common_dtype(true)
.cast_common_dtype_to_outputs(true)
.enforce_safe_casting_to_output(true)
.build();
threshold_kernel(iter, threshold, value);
return iter.output();
}
Tensor threshold_cuda(const Tensor& self, const Scalar& threshold, const Scalar& value) {
return threshold_out_cuda(nullopt, self, threshold, value, self);
}
Tensor& threshold__cuda(Tensor& self, const Scalar& threshold, const Scalar& value) {
threshold_out_cuda(make_optional(self), self, threshold, value, self);
return self;
}
Tensor& threshold_out_cuda(const Tensor& self, const Scalar& threshold, const Scalar& value, Tensor& result) {
threshold_out_cuda(make_optional(result), self, threshold, value, self);
return result;
}
Tensor threshold_backward_cuda(const Tensor& grad, const Tensor& self, const Scalar& threshold) {
return threshold_out_cuda(nullopt, self, threshold, 0, grad);
}
REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel);
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);
REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);
REGISTER_DISPATCH(elu_stub, &elu_kernel);
REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel);
REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel);
REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel);
REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel);
REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel);
REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel);
REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel);
REGISTER_DISPATCH(softplus_stub, &softplus_kernel);
REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel);
REGISTER_DISPATCH(silu_stub, &silu_kernel);
REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel);
} // namespace native
} // namespace at
|
e16dda4a5252ad5efca7edf03b2d41682f4c696e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <vector>
#include "type.h"
#include "tree.h"
#include "kernels.h"
#include "timer.h"
#include "dtt.h"
#include "spharm_gpu.h"
#include "cuda_utils.h"
void dual_tree_traversal_core(t_fmm_params* params, t_node* target, t_node* source,
std::vector<std::vector<int>>& vec_p2p_interactions, std::vector<std::vector<int>>& vec_m2l_interactions)
{
TYPE dx = source->center[0] - target->center[0];
TYPE dy = source->center[1] - target->center[1];
TYPE dz = source->center[2] - target->center[2];
TYPE r2 = dx*dx + dy*dy + dz*dz;
TYPE d1 = source->rad*2.0;
TYPE d2 = target->rad*2.0;
if ((d1+d2)*(d1+d2) < params->theta2*r2)
{
//m2l(params, target, source);
vec_m2l_interactions[target->offset].push_back(source->offset);
}
else if (is_leaf(source) && is_leaf(target))
{
//p2p(params, target, source);
vec_p2p_interactions[target->offset].push_back(source->offset);
}
else
{
TYPE target_sz = target->rad;
TYPE source_sz = source->rad;
if (is_leaf(source) || (target_sz >= source_sz && !is_leaf(target)))
{
for (size_t i = 0; i < target->num_children; ++i)
dual_tree_traversal_core(params, get_node(params, target->child[i]), source, vec_p2p_interactions, vec_m2l_interactions);
}
else
{
for (size_t i = 0; i < source->num_children; ++i)
//dual_tree_traversal_core(params, target, source->child[i]);
dual_tree_traversal_core(params, target, get_node(params, source->child[i]), vec_p2p_interactions, vec_m2l_interactions);
}
}
}
__global__
void direct_gpu(
TYPE* d_x, TYPE* d_y, TYPE* d_z, TYPE* d_w,
TYPE* d_ax, TYPE* d_ay, TYPE* d_az, TYPE* d_p, int n)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i >= n) return;
TYPE xi = d_x[i];
TYPE yi = d_y[i];
TYPE zi = d_z[i];
TYPE ax = 0.0f;
TYPE ay = 0.0f;
TYPE az = 0.0f;
TYPE p = 0.0f;
for (int j = 0; j < n; ++j)
{
TYPE dx = d_x[j] - xi;
TYPE dy = d_y[j] - yi;
TYPE dz = d_z[j] - zi;
TYPE wj = d_w[j];
TYPE r = dx*dx + dy*dy + dz*dz;
TYPE inv_r = (r == 0.0f) ? 0.0f : rsqrtf(r);
TYPE inv_r_3 = inv_r*inv_r*inv_r;
ax += dx*wj*inv_r_3;
ay += dy*wj*inv_r_3;
az += dz*wj*inv_r_3;
p += wj*inv_r;
}
d_ax[i] += ax;
d_ay[i] += ay;
d_az[i] += az;
d_p[i] += p;
}
__global__
void gpu_p2p_interactions(
t_node* d_nodes,
TYPE* d_x, TYPE* d_y, TYPE* d_z, TYPE* d_w,
TYPE* d_ax, TYPE* d_ay, TYPE* d_az, TYPE* d_p,
int* d_p2p_interactions, int* d_p2p_sizes, int* d_p2p_offsets,
size_t num_nodes)
{
long wid = threadIdx.x/32;
long lane = threadIdx.x % 32;
const long num_warps = 256/32;
long target_offset = blockIdx.x*(blockDim.x/32)+wid;
if (target_offset >= num_nodes) return;
t_node* target = &d_nodes[target_offset];
size_t tidx = target->point_idx;
const TYPE* tx = &d_x[tidx];
const TYPE* ty = &d_y[tidx];
const TYPE* tz = &d_z[tidx];
TYPE* tax = &d_ax[tidx];
TYPE* tay = &d_ay[tidx];
TYPE* taz = &d_az[tidx];
TYPE* tp = &d_p[tidx];
const long shmem_sz = 256;
__shared__ float4 shmem_base[shmem_sz*num_warps];
float4* shmem = &shmem_base[wid*shmem_sz];
const int interaction_offset = d_p2p_offsets[target_offset];
const int* interaction_list = &d_p2p_interactions[interaction_offset];
const int num_interacts = d_p2p_sizes[target_offset];
const long num_target_points = target->num_points;
for (size_t ii = lane; ii < target->num_points+31; ii += 32)
{
TYPE ax = 0.0, ay = 0.0, az = 0.0, p = 0.0;
const TYPE xi = (ii >= num_target_points) ? 0.0f : tx[ii];
const TYPE yi = (ii >= num_target_points) ? 0.0f : ty[ii];
const TYPE zi = (ii >= num_target_points) ? 0.0f : tz[ii];
for (size_t j = 0; j < num_interacts; ++j)
{
int source_offset = interaction_list[j];
t_node* source = &d_nodes[source_offset];
size_t sidx = source->point_idx;
const TYPE* sx = &d_x[sidx];
const TYPE* sy = &d_y[sidx];
const TYPE* sz = &d_z[sidx];
const TYPE* sw = &d_w[sidx];
const size_t num_source_points = source->num_points;
for (size_t jb = 0; jb < num_source_points; jb += shmem_sz)
{
#pragma unroll 32
for (size_t jj = lane; jj < shmem_sz; jj += 32)
{
if (jj+jb >= num_source_points) break;
shmem[jj].x = sx[jj+jb];
shmem[jj].y = sy[jj+jb];
shmem[jj].z = sz[jj+jb];
shmem[jj].w = sw[jj+jb];
}
#pragma unroll 32
for (size_t jj = 0; jj < shmem_sz; ++jj)
{
if (jj+jb >= num_source_points) break;
TYPE dx = shmem[jj].x - xi;
TYPE dy = shmem[jj].y - yi;
TYPE dz = shmem[jj].z - zi;
TYPE wj = shmem[jj].w;
TYPE r = dx*dx + dy*dy + dz*dz;
TYPE inv_r = (r == 0.0f) ? 0.0f : rsqrtf(r);
TYPE inv_r_3 = inv_r*inv_r*inv_r;
ax += dx*wj*inv_r_3;
ay += dy*wj*inv_r_3;
az += dz*wj*inv_r_3;
p += wj*inv_r;
}
}
}
if (ii >= num_target_points) break;
tax[ii] += ax;
tay[ii] += ay;
taz[ii] += az;
tp[ii] += p;
}
}
#define S_IDX(n,m) ((n)*(n)+(n)+(m))
__device__
void cart_to_sph(TYPE x, TYPE y, TYPE z, TYPE* pr, TYPE* ptheta, TYPE* pphi)
{
*pr = TYPE_SQRT(x*x+y*y+z*z);
*ptheta = (*pr == TYPE_ZERO) ? TYPE_ZERO : TYPE_ACOS(z/(*pr));
*pphi = TYPE_ATAN2(y, x);
}
__global__
void gpu_m2l_interactions(
t_node* d_nodes,
TYPE* d_m_real, TYPE* d_m_imag, TYPE* d_l_real, TYPE* d_l_imag,
int* d_m2l_interactions, int* d_m2l_sizes, int* d_m2l_offsets,
size_t num_nodes)
{
long wid = threadIdx.x/32;
long lane = threadIdx.x % 32;
const long num_warps = 256/32;
long target_offset = blockIdx.x*(blockDim.x/32)+wid;
if (target_offset >= num_nodes) return;
t_node* target = &d_nodes[target_offset];
size_t tidx = target->mult_idx;
TYPE* L_real = &d_l_real[tidx];
TYPE* L_imag = &d_l_imag[tidx];
const int num_terms = 4;
__shared__ TYPE outer_real[num_warps*num_terms*num_terms];
__shared__ TYPE outer_imag[num_warps*num_terms*num_terms];
const int num_ints = d_m2l_sizes[target_offset];
const int interact_offset = d_m2l_offsets[target_offset];
const int* interact_list = &d_m2l_interactions[interact_offset];
size_t woff = wid*num_terms*num_terms;
//if (lane == 0) printf("target %ld has %ld interactions\n", target_offset, num_ints);
if (lane != 0) return;
//if (lane == 0)
for (size_t i = 0; i < num_ints; ++i)
{
size_t source_offset = interact_list[i];
t_node* source = &d_nodes[source_offset];
TYPE dx = target->center[0] - source->center[0];
TYPE dy = target->center[1] - source->center[1];
TYPE dz = target->center[2] - source->center[2];
size_t sidx = source->mult_idx;
TYPE* M_real = &d_m_real[sidx];
TYPE* M_imag = &d_m_imag[sidx];
TYPE rho, alpha, beta;
cart_to_sph(dx, dy, dz, &rho, &alpha, &beta);
compute_outer_gpu(num_terms, rho, alpha, beta,
&outer_real[wid*num_terms*num_terms], &outer_imag[wid*num_terms*num_terms]);
for (int j = 0; j < num_terms; ++j)
{
for (int k = -j; k <= j; ++k)
{
TYPE tmp_real = TYPE_ZERO;
TYPE tmp_imag = TYPE_ZERO;
for (int n = 0; n < num_terms-j; ++n)
{
for (int m = -n; m <= n; ++m)
{
tmp_real += M_real[S_IDX(n,m)]*outer_real[woff+S_IDX(j+n,-k-m)] -
M_imag[S_IDX(n,m)]*outer_imag[woff+S_IDX(j+n,-k-m)];
tmp_imag += M_real[S_IDX(n,m)]*outer_imag[woff+S_IDX(j+n,-k-m)] +
M_imag[S_IDX(n,m)]*outer_real[woff+S_IDX(j+n,-k-m)];
}
}
L_real[S_IDX(j,k)] += tmp_real;
L_imag[S_IDX(j,k)] += tmp_imag;
}
}
}
}
void dual_tree_traversal(t_fmm_params* params)
{
t_node* d_nodes;
TYPE* d_x;
TYPE* d_y;
TYPE* d_z;
TYPE* d_w;
TYPE* d_m_real;
TYPE* d_m_imag;
TYPE* d_l_real;
TYPE* d_l_imag;
TYPE* d_ax;
TYPE* d_ay;
TYPE* d_az;
TYPE* d_p;
size_t np = params->num_points;
CUDACHK(hipMalloc((void**)&d_x, sizeof(TYPE)*np));
CUDACHK(hipMalloc((void**)&d_y, sizeof(TYPE)*np));
CUDACHK(hipMalloc((void**)&d_z, sizeof(TYPE)*np));
CUDACHK(hipMalloc((void**)&d_w, sizeof(TYPE)*np));
CUDACHK(hipMalloc((void**)&d_ax, sizeof(TYPE)*np));
CUDACHK(hipMalloc((void**)&d_ay, sizeof(TYPE)*np));
CUDACHK(hipMalloc((void**)&d_az, sizeof(TYPE)*np));
CUDACHK(hipMalloc((void**)&d_p, sizeof(TYPE)*np));
size_t nm = params->num_multipoles*params->num_nodes;
CUDACHK(hipMalloc((void**)&d_m_real, sizeof(TYPE)*nm));
CUDACHK(hipMalloc((void**)&d_m_imag, sizeof(TYPE)*nm));
CUDACHK(hipMalloc((void**)&d_l_real, sizeof(TYPE)*nm));
CUDACHK(hipMalloc((void**)&d_l_imag, sizeof(TYPE)*nm));
CUDACHK(hipMalloc((void**)&d_nodes, sizeof(t_node)*params->num_nodes));
CUDACHK(hipMemcpy(d_x, ¶ms->x[0], sizeof(TYPE)*np, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_y, ¶ms->y[0], sizeof(TYPE)*np, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_z, ¶ms->z[0], sizeof(TYPE)*np, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_w, ¶ms->w[0], sizeof(TYPE)*np, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_ax, ¶ms->ax[0], sizeof(TYPE)*np, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_ay, ¶ms->ay[0], sizeof(TYPE)*np, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_az, ¶ms->az[0], sizeof(TYPE)*np, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_p, ¶ms->p[0], sizeof(TYPE)*np, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_l_real, ¶ms->L_array_real[0], sizeof(TYPE)*nm, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_l_imag, ¶ms->L_array_imag[0], sizeof(TYPE)*nm, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_m_real, ¶ms->M_array_real[0], sizeof(TYPE)*nm, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_m_imag, ¶ms->M_array_imag[0], sizeof(TYPE)*nm, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_nodes, ¶ms->node_array[0], sizeof(t_node)*params->num_nodes, hipMemcpyHostToDevice));
init_spharm_gpu(params);
//size_t* p2p_interactions = (size_t*)malloc(sizeof(size_t)*params->num_nodes*params->num_nodes);
//size_t* num_p2p_interactions = (size_t*)malloc(sizeof(size_t)*params->num_nodes);
//size_t* m2l_interactions = (size_t*)malloc(sizeof(size_t)*params->num_nodes*params->num_nodes);
//size_t* num_m2l_interactions = (size_t*)malloc(sizeof(size_t)*params->num_nodes);
//size_t* d_p2p_interactions;
//size_t* d_num_p2p_interactions;
//size_t* d_m2l_interactions;
//size_t* d_num_m2l_interactions;
//CUDACHK(hipMalloc((void**)&d_p2p_interactions, sizeof(size_t)*nn*nn));
//CUDACHK(hipMalloc((void**)&d_num_p2p_interactions, sizeof(size_t)*nn));
//CUDACHK(hipMalloc((void**)&d_m2l_interactions, sizeof(size_t)*nn*nn));
//CUDACHK(hipMalloc((void**)&d_num_m2l_interactions, sizeof(size_t)*nn));
size_t nn = params->num_nodes;
std::vector<std::vector<int>> vec_p2p_interactions(nn, std::vector<int>());
std::vector<std::vector<int>> vec_m2l_interactions(nn, std::vector<int>());
t_timer dtt_timer;
start(&dtt_timer);
dual_tree_traversal_core(params, get_node(params, params->root), get_node(params, params->root), vec_p2p_interactions, vec_m2l_interactions);
stop(&dtt_timer);
t_timer transfer_timer;
start(&transfer_timer);
int* h_p2p_sizes = (int*)malloc(sizeof(int)*nn);
int* h_m2l_sizes = (int*)malloc(sizeof(int)*nn);
int* h_p2p_offsets = (int*)malloc(sizeof(int)*nn);
int* h_m2l_offsets = (int*)malloc(sizeof(int)*nn);
size_t tot_p2p = 0, tot_m2l = 0;
for (int i = 0; i < params->num_nodes; ++i)
{
int num_p2p = vec_p2p_interactions[i].size();
int num_m2l = vec_m2l_interactions[i].size();
h_p2p_offsets[i] = tot_p2p;
h_m2l_offsets[i] = tot_m2l;
tot_p2p += num_p2p;
tot_m2l += num_m2l;
}
int* h_p2p_interactions = (int*)malloc(sizeof(int)*tot_p2p);
int* h_m2l_interactions = (int*)malloc(sizeof(int)*tot_m2l);
for (int i = 0; i < params->num_nodes; ++i)
{
int num_p2p = vec_p2p_interactions[i].size();
int num_m2l = vec_m2l_interactions[i].size();
int p2p_offset = h_p2p_offsets[i];
int m2l_offset = h_m2l_offsets[i];
h_p2p_sizes[i] = num_p2p;
h_m2l_sizes[i] = num_m2l;
for (int j = 0; j < num_p2p; ++j)
h_p2p_interactions[p2p_offset+j] = vec_p2p_interactions[i][j];
for (int j = 0; j < num_m2l; ++j)
h_m2l_interactions[m2l_offset+j] = vec_m2l_interactions[i][j];
}
int* d_p2p_sizes;
int* d_m2l_sizes;
int* d_p2p_offsets;
int* d_m2l_offsets;
int* d_p2p_interactions;
int* d_m2l_interactions;
CUDACHK(hipMalloc((void**)&d_p2p_sizes, sizeof(int)*nn));
CUDACHK(hipMalloc((void**)&d_m2l_sizes, sizeof(int)*nn));
CUDACHK(hipMalloc((void**)&d_p2p_offsets, sizeof(int)*nn));
CUDACHK(hipMalloc((void**)&d_m2l_offsets, sizeof(int)*nn));
CUDACHK(hipMalloc((void**)&d_p2p_interactions, sizeof(int)*tot_p2p));
CUDACHK(hipMalloc((void**)&d_m2l_interactions, sizeof(int)*tot_m2l));
CUDACHK(hipMemcpy(d_p2p_sizes, h_p2p_sizes, sizeof(int)*nn, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_m2l_sizes, h_m2l_sizes, sizeof(int)*nn, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_p2p_offsets, h_p2p_offsets, sizeof(int)*nn, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_m2l_offsets, h_m2l_offsets, sizeof(int)*nn, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_p2p_interactions, h_p2p_interactions, sizeof(int)*tot_p2p, hipMemcpyHostToDevice));
CUDACHK(hipMemcpy(d_m2l_interactions, h_m2l_interactions, sizeof(int)*tot_m2l, hipMemcpyHostToDevice));
stop(&transfer_timer);
//direct_gpu<<<num_blocks, block_sz>>>(d_x, d_y, d_z, d_w, d_ax, d_ay, d_az, d_p, np);
//CUDACHK(hipMemcpy(d_p2p_interactions, p2p_interactions, sizeof(size_t)*nn*nn, hipMemcpyHostToDevice));
//CUDACHK(hipMemcpy(d_num_p2p_interactions, num_p2p_interactions, sizeof(size_t)*nn, hipMemcpyHostToDevice));
//CUDACHK(hipMemcpy(d_m2l_interactions, m2l_interactions, sizeof(size_t)*nn*nn, hipMemcpyHostToDevice));
//CUDACHK(hipMemcpy(d_num_m2l_interactions, num_m2l_interactions, sizeof(size_t)*nn, hipMemcpyHostToDevice));
const int block_sz = 256;
const int num_blocks = nn/4 + ((nn % 4) ? 1 : 0);
t_timer timer;
if(params->dodebug)
start(&timer);
hipLaunchKernelGGL(( gpu_p2p_interactions), dim3(num_blocks), dim3(block_sz), 0, 0, d_nodes, d_x, d_y, d_z, d_w, d_ax, d_ay, d_az, d_p, d_p2p_interactions, d_p2p_sizes, d_p2p_offsets, nn);
hipLaunchKernelGGL(( gpu_m2l_interactions), dim3(num_blocks), dim3(block_sz), 0, 0, d_nodes, d_m_real, d_m_imag, d_l_real, d_l_imag, d_m2l_interactions, d_m2l_sizes, d_m2l_offsets, nn);
CUDACHK(hipPeekAtLastError());
CUDACHK(hipDeviceSynchronize());
if(params->dodebug)
{
stop(&timer);
printf("tot p2p = %zu, tot_m2l = %zu\n", tot_p2p, tot_m2l);
printf("total memory to allocate to GPU = %zu MB\n", (sizeof(int)*(size_t)(4*nn + tot_p2p + tot_m2l))/1024/1024);
printf("----------\n");
printf("GPU elapsed time = %f\n", timer.elapsed);
printf("DTT elapsed time = %f\n", dtt_timer.elapsed);
printf("MEM elapsed time = %f\n", transfer_timer.elapsed);
printf("Total elapsed time = %f\n", timer.elapsed + dtt_timer.elapsed + transfer_timer.elapsed);
printf("----------\n");
}
CUDACHK(hipMemcpy(¶ms->ax[0], d_ax, sizeof(TYPE)*np, hipMemcpyDeviceToHost));
CUDACHK(hipMemcpy(¶ms->ay[0], d_ay, sizeof(TYPE)*np, hipMemcpyDeviceToHost));
CUDACHK(hipMemcpy(¶ms->az[0], d_az, sizeof(TYPE)*np, hipMemcpyDeviceToHost));
CUDACHK(hipMemcpy(¶ms->p[0] , d_p, sizeof(TYPE)*np, hipMemcpyDeviceToHost));
TYPE* l_real = (TYPE*)malloc(sizeof(TYPE)*nm);
TYPE* l_imag = (TYPE*)malloc(sizeof(TYPE)*nm);
CUDACHK(hipMemcpy(l_real, d_l_real, sizeof(TYPE)*nm, hipMemcpyDeviceToHost));
CUDACHK(hipMemcpy(l_imag, d_l_imag, sizeof(TYPE)*nm, hipMemcpyDeviceToHost));
CUDACHK(hipMemcpy(¶ms->L_array_real[0], d_l_real, sizeof(TYPE)*nm, hipMemcpyDeviceToHost));
CUDACHK(hipMemcpy(¶ms->L_array_imag[0], d_l_imag, sizeof(TYPE)*nm, hipMemcpyDeviceToHost));
}
|
e16dda4a5252ad5efca7edf03b2d41682f4c696e.cu
|
#include <stdlib.h>
#include <vector>
#include "type.h"
#include "tree.h"
#include "kernels.h"
#include "timer.h"
#include "dtt.h"
#include "spharm_gpu.h"
#include "cuda_utils.h"
void dual_tree_traversal_core(t_fmm_params* params, t_node* target, t_node* source,
std::vector<std::vector<int>>& vec_p2p_interactions, std::vector<std::vector<int>>& vec_m2l_interactions)
{
TYPE dx = source->center[0] - target->center[0];
TYPE dy = source->center[1] - target->center[1];
TYPE dz = source->center[2] - target->center[2];
TYPE r2 = dx*dx + dy*dy + dz*dz;
TYPE d1 = source->rad*2.0;
TYPE d2 = target->rad*2.0;
if ((d1+d2)*(d1+d2) < params->theta2*r2)
{
//m2l(params, target, source);
vec_m2l_interactions[target->offset].push_back(source->offset);
}
else if (is_leaf(source) && is_leaf(target))
{
//p2p(params, target, source);
vec_p2p_interactions[target->offset].push_back(source->offset);
}
else
{
TYPE target_sz = target->rad;
TYPE source_sz = source->rad;
if (is_leaf(source) || (target_sz >= source_sz && !is_leaf(target)))
{
for (size_t i = 0; i < target->num_children; ++i)
dual_tree_traversal_core(params, get_node(params, target->child[i]), source, vec_p2p_interactions, vec_m2l_interactions);
}
else
{
for (size_t i = 0; i < source->num_children; ++i)
//dual_tree_traversal_core(params, target, source->child[i]);
dual_tree_traversal_core(params, target, get_node(params, source->child[i]), vec_p2p_interactions, vec_m2l_interactions);
}
}
}
__global__
void direct_gpu(
TYPE* d_x, TYPE* d_y, TYPE* d_z, TYPE* d_w,
TYPE* d_ax, TYPE* d_ay, TYPE* d_az, TYPE* d_p, int n)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i >= n) return;
TYPE xi = d_x[i];
TYPE yi = d_y[i];
TYPE zi = d_z[i];
TYPE ax = 0.0f;
TYPE ay = 0.0f;
TYPE az = 0.0f;
TYPE p = 0.0f;
for (int j = 0; j < n; ++j)
{
TYPE dx = d_x[j] - xi;
TYPE dy = d_y[j] - yi;
TYPE dz = d_z[j] - zi;
TYPE wj = d_w[j];
TYPE r = dx*dx + dy*dy + dz*dz;
TYPE inv_r = (r == 0.0f) ? 0.0f : rsqrtf(r);
TYPE inv_r_3 = inv_r*inv_r*inv_r;
ax += dx*wj*inv_r_3;
ay += dy*wj*inv_r_3;
az += dz*wj*inv_r_3;
p += wj*inv_r;
}
d_ax[i] += ax;
d_ay[i] += ay;
d_az[i] += az;
d_p[i] += p;
}
__global__
void gpu_p2p_interactions(
t_node* d_nodes,
TYPE* d_x, TYPE* d_y, TYPE* d_z, TYPE* d_w,
TYPE* d_ax, TYPE* d_ay, TYPE* d_az, TYPE* d_p,
int* d_p2p_interactions, int* d_p2p_sizes, int* d_p2p_offsets,
size_t num_nodes)
{
long wid = threadIdx.x/32;
long lane = threadIdx.x % 32;
const long num_warps = 256/32;
long target_offset = blockIdx.x*(blockDim.x/32)+wid;
if (target_offset >= num_nodes) return;
t_node* target = &d_nodes[target_offset];
size_t tidx = target->point_idx;
const TYPE* tx = &d_x[tidx];
const TYPE* ty = &d_y[tidx];
const TYPE* tz = &d_z[tidx];
TYPE* tax = &d_ax[tidx];
TYPE* tay = &d_ay[tidx];
TYPE* taz = &d_az[tidx];
TYPE* tp = &d_p[tidx];
const long shmem_sz = 256;
__shared__ float4 shmem_base[shmem_sz*num_warps];
float4* shmem = &shmem_base[wid*shmem_sz];
const int interaction_offset = d_p2p_offsets[target_offset];
const int* interaction_list = &d_p2p_interactions[interaction_offset];
const int num_interacts = d_p2p_sizes[target_offset];
const long num_target_points = target->num_points;
for (size_t ii = lane; ii < target->num_points+31; ii += 32)
{
TYPE ax = 0.0, ay = 0.0, az = 0.0, p = 0.0;
const TYPE xi = (ii >= num_target_points) ? 0.0f : tx[ii];
const TYPE yi = (ii >= num_target_points) ? 0.0f : ty[ii];
const TYPE zi = (ii >= num_target_points) ? 0.0f : tz[ii];
for (size_t j = 0; j < num_interacts; ++j)
{
int source_offset = interaction_list[j];
t_node* source = &d_nodes[source_offset];
size_t sidx = source->point_idx;
const TYPE* sx = &d_x[sidx];
const TYPE* sy = &d_y[sidx];
const TYPE* sz = &d_z[sidx];
const TYPE* sw = &d_w[sidx];
const size_t num_source_points = source->num_points;
for (size_t jb = 0; jb < num_source_points; jb += shmem_sz)
{
#pragma unroll 32
for (size_t jj = lane; jj < shmem_sz; jj += 32)
{
if (jj+jb >= num_source_points) break;
shmem[jj].x = sx[jj+jb];
shmem[jj].y = sy[jj+jb];
shmem[jj].z = sz[jj+jb];
shmem[jj].w = sw[jj+jb];
}
#pragma unroll 32
for (size_t jj = 0; jj < shmem_sz; ++jj)
{
if (jj+jb >= num_source_points) break;
TYPE dx = shmem[jj].x - xi;
TYPE dy = shmem[jj].y - yi;
TYPE dz = shmem[jj].z - zi;
TYPE wj = shmem[jj].w;
TYPE r = dx*dx + dy*dy + dz*dz;
TYPE inv_r = (r == 0.0f) ? 0.0f : rsqrtf(r);
TYPE inv_r_3 = inv_r*inv_r*inv_r;
ax += dx*wj*inv_r_3;
ay += dy*wj*inv_r_3;
az += dz*wj*inv_r_3;
p += wj*inv_r;
}
}
}
if (ii >= num_target_points) break;
tax[ii] += ax;
tay[ii] += ay;
taz[ii] += az;
tp[ii] += p;
}
}
#define S_IDX(n,m) ((n)*(n)+(n)+(m))
__device__
void cart_to_sph(TYPE x, TYPE y, TYPE z, TYPE* pr, TYPE* ptheta, TYPE* pphi)
{
*pr = TYPE_SQRT(x*x+y*y+z*z);
*ptheta = (*pr == TYPE_ZERO) ? TYPE_ZERO : TYPE_ACOS(z/(*pr));
*pphi = TYPE_ATAN2(y, x);
}
__global__
void gpu_m2l_interactions(
t_node* d_nodes,
TYPE* d_m_real, TYPE* d_m_imag, TYPE* d_l_real, TYPE* d_l_imag,
int* d_m2l_interactions, int* d_m2l_sizes, int* d_m2l_offsets,
size_t num_nodes)
{
long wid = threadIdx.x/32;
long lane = threadIdx.x % 32;
const long num_warps = 256/32;
long target_offset = blockIdx.x*(blockDim.x/32)+wid;
if (target_offset >= num_nodes) return;
t_node* target = &d_nodes[target_offset];
size_t tidx = target->mult_idx;
TYPE* L_real = &d_l_real[tidx];
TYPE* L_imag = &d_l_imag[tidx];
const int num_terms = 4;
__shared__ TYPE outer_real[num_warps*num_terms*num_terms];
__shared__ TYPE outer_imag[num_warps*num_terms*num_terms];
const int num_ints = d_m2l_sizes[target_offset];
const int interact_offset = d_m2l_offsets[target_offset];
const int* interact_list = &d_m2l_interactions[interact_offset];
size_t woff = wid*num_terms*num_terms;
//if (lane == 0) printf("target %ld has %ld interactions\n", target_offset, num_ints);
if (lane != 0) return;
//if (lane == 0)
for (size_t i = 0; i < num_ints; ++i)
{
size_t source_offset = interact_list[i];
t_node* source = &d_nodes[source_offset];
TYPE dx = target->center[0] - source->center[0];
TYPE dy = target->center[1] - source->center[1];
TYPE dz = target->center[2] - source->center[2];
size_t sidx = source->mult_idx;
TYPE* M_real = &d_m_real[sidx];
TYPE* M_imag = &d_m_imag[sidx];
TYPE rho, alpha, beta;
cart_to_sph(dx, dy, dz, &rho, &alpha, &beta);
compute_outer_gpu(num_terms, rho, alpha, beta,
&outer_real[wid*num_terms*num_terms], &outer_imag[wid*num_terms*num_terms]);
for (int j = 0; j < num_terms; ++j)
{
for (int k = -j; k <= j; ++k)
{
TYPE tmp_real = TYPE_ZERO;
TYPE tmp_imag = TYPE_ZERO;
for (int n = 0; n < num_terms-j; ++n)
{
for (int m = -n; m <= n; ++m)
{
tmp_real += M_real[S_IDX(n,m)]*outer_real[woff+S_IDX(j+n,-k-m)] -
M_imag[S_IDX(n,m)]*outer_imag[woff+S_IDX(j+n,-k-m)];
tmp_imag += M_real[S_IDX(n,m)]*outer_imag[woff+S_IDX(j+n,-k-m)] +
M_imag[S_IDX(n,m)]*outer_real[woff+S_IDX(j+n,-k-m)];
}
}
L_real[S_IDX(j,k)] += tmp_real;
L_imag[S_IDX(j,k)] += tmp_imag;
}
}
}
}
void dual_tree_traversal(t_fmm_params* params)
{
t_node* d_nodes;
TYPE* d_x;
TYPE* d_y;
TYPE* d_z;
TYPE* d_w;
TYPE* d_m_real;
TYPE* d_m_imag;
TYPE* d_l_real;
TYPE* d_l_imag;
TYPE* d_ax;
TYPE* d_ay;
TYPE* d_az;
TYPE* d_p;
size_t np = params->num_points;
CUDACHK(cudaMalloc((void**)&d_x, sizeof(TYPE)*np));
CUDACHK(cudaMalloc((void**)&d_y, sizeof(TYPE)*np));
CUDACHK(cudaMalloc((void**)&d_z, sizeof(TYPE)*np));
CUDACHK(cudaMalloc((void**)&d_w, sizeof(TYPE)*np));
CUDACHK(cudaMalloc((void**)&d_ax, sizeof(TYPE)*np));
CUDACHK(cudaMalloc((void**)&d_ay, sizeof(TYPE)*np));
CUDACHK(cudaMalloc((void**)&d_az, sizeof(TYPE)*np));
CUDACHK(cudaMalloc((void**)&d_p, sizeof(TYPE)*np));
size_t nm = params->num_multipoles*params->num_nodes;
CUDACHK(cudaMalloc((void**)&d_m_real, sizeof(TYPE)*nm));
CUDACHK(cudaMalloc((void**)&d_m_imag, sizeof(TYPE)*nm));
CUDACHK(cudaMalloc((void**)&d_l_real, sizeof(TYPE)*nm));
CUDACHK(cudaMalloc((void**)&d_l_imag, sizeof(TYPE)*nm));
CUDACHK(cudaMalloc((void**)&d_nodes, sizeof(t_node)*params->num_nodes));
CUDACHK(cudaMemcpy(d_x, ¶ms->x[0], sizeof(TYPE)*np, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_y, ¶ms->y[0], sizeof(TYPE)*np, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_z, ¶ms->z[0], sizeof(TYPE)*np, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_w, ¶ms->w[0], sizeof(TYPE)*np, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_ax, ¶ms->ax[0], sizeof(TYPE)*np, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_ay, ¶ms->ay[0], sizeof(TYPE)*np, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_az, ¶ms->az[0], sizeof(TYPE)*np, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_p, ¶ms->p[0], sizeof(TYPE)*np, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_l_real, ¶ms->L_array_real[0], sizeof(TYPE)*nm, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_l_imag, ¶ms->L_array_imag[0], sizeof(TYPE)*nm, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_m_real, ¶ms->M_array_real[0], sizeof(TYPE)*nm, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_m_imag, ¶ms->M_array_imag[0], sizeof(TYPE)*nm, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_nodes, ¶ms->node_array[0], sizeof(t_node)*params->num_nodes, cudaMemcpyHostToDevice));
init_spharm_gpu(params);
//size_t* p2p_interactions = (size_t*)malloc(sizeof(size_t)*params->num_nodes*params->num_nodes);
//size_t* num_p2p_interactions = (size_t*)malloc(sizeof(size_t)*params->num_nodes);
//size_t* m2l_interactions = (size_t*)malloc(sizeof(size_t)*params->num_nodes*params->num_nodes);
//size_t* num_m2l_interactions = (size_t*)malloc(sizeof(size_t)*params->num_nodes);
//size_t* d_p2p_interactions;
//size_t* d_num_p2p_interactions;
//size_t* d_m2l_interactions;
//size_t* d_num_m2l_interactions;
//CUDACHK(cudaMalloc((void**)&d_p2p_interactions, sizeof(size_t)*nn*nn));
//CUDACHK(cudaMalloc((void**)&d_num_p2p_interactions, sizeof(size_t)*nn));
//CUDACHK(cudaMalloc((void**)&d_m2l_interactions, sizeof(size_t)*nn*nn));
//CUDACHK(cudaMalloc((void**)&d_num_m2l_interactions, sizeof(size_t)*nn));
size_t nn = params->num_nodes;
std::vector<std::vector<int>> vec_p2p_interactions(nn, std::vector<int>());
std::vector<std::vector<int>> vec_m2l_interactions(nn, std::vector<int>());
t_timer dtt_timer;
start(&dtt_timer);
dual_tree_traversal_core(params, get_node(params, params->root), get_node(params, params->root), vec_p2p_interactions, vec_m2l_interactions);
stop(&dtt_timer);
t_timer transfer_timer;
start(&transfer_timer);
int* h_p2p_sizes = (int*)malloc(sizeof(int)*nn);
int* h_m2l_sizes = (int*)malloc(sizeof(int)*nn);
int* h_p2p_offsets = (int*)malloc(sizeof(int)*nn);
int* h_m2l_offsets = (int*)malloc(sizeof(int)*nn);
size_t tot_p2p = 0, tot_m2l = 0;
for (int i = 0; i < params->num_nodes; ++i)
{
int num_p2p = vec_p2p_interactions[i].size();
int num_m2l = vec_m2l_interactions[i].size();
h_p2p_offsets[i] = tot_p2p;
h_m2l_offsets[i] = tot_m2l;
tot_p2p += num_p2p;
tot_m2l += num_m2l;
}
int* h_p2p_interactions = (int*)malloc(sizeof(int)*tot_p2p);
int* h_m2l_interactions = (int*)malloc(sizeof(int)*tot_m2l);
for (int i = 0; i < params->num_nodes; ++i)
{
int num_p2p = vec_p2p_interactions[i].size();
int num_m2l = vec_m2l_interactions[i].size();
int p2p_offset = h_p2p_offsets[i];
int m2l_offset = h_m2l_offsets[i];
h_p2p_sizes[i] = num_p2p;
h_m2l_sizes[i] = num_m2l;
for (int j = 0; j < num_p2p; ++j)
h_p2p_interactions[p2p_offset+j] = vec_p2p_interactions[i][j];
for (int j = 0; j < num_m2l; ++j)
h_m2l_interactions[m2l_offset+j] = vec_m2l_interactions[i][j];
}
int* d_p2p_sizes;
int* d_m2l_sizes;
int* d_p2p_offsets;
int* d_m2l_offsets;
int* d_p2p_interactions;
int* d_m2l_interactions;
CUDACHK(cudaMalloc((void**)&d_p2p_sizes, sizeof(int)*nn));
CUDACHK(cudaMalloc((void**)&d_m2l_sizes, sizeof(int)*nn));
CUDACHK(cudaMalloc((void**)&d_p2p_offsets, sizeof(int)*nn));
CUDACHK(cudaMalloc((void**)&d_m2l_offsets, sizeof(int)*nn));
CUDACHK(cudaMalloc((void**)&d_p2p_interactions, sizeof(int)*tot_p2p));
CUDACHK(cudaMalloc((void**)&d_m2l_interactions, sizeof(int)*tot_m2l));
CUDACHK(cudaMemcpy(d_p2p_sizes, h_p2p_sizes, sizeof(int)*nn, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_m2l_sizes, h_m2l_sizes, sizeof(int)*nn, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_p2p_offsets, h_p2p_offsets, sizeof(int)*nn, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_m2l_offsets, h_m2l_offsets, sizeof(int)*nn, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_p2p_interactions, h_p2p_interactions, sizeof(int)*tot_p2p, cudaMemcpyHostToDevice));
CUDACHK(cudaMemcpy(d_m2l_interactions, h_m2l_interactions, sizeof(int)*tot_m2l, cudaMemcpyHostToDevice));
stop(&transfer_timer);
//direct_gpu<<<num_blocks, block_sz>>>(d_x, d_y, d_z, d_w, d_ax, d_ay, d_az, d_p, np);
//CUDACHK(cudaMemcpy(d_p2p_interactions, p2p_interactions, sizeof(size_t)*nn*nn, cudaMemcpyHostToDevice));
//CUDACHK(cudaMemcpy(d_num_p2p_interactions, num_p2p_interactions, sizeof(size_t)*nn, cudaMemcpyHostToDevice));
//CUDACHK(cudaMemcpy(d_m2l_interactions, m2l_interactions, sizeof(size_t)*nn*nn, cudaMemcpyHostToDevice));
//CUDACHK(cudaMemcpy(d_num_m2l_interactions, num_m2l_interactions, sizeof(size_t)*nn, cudaMemcpyHostToDevice));
const int block_sz = 256;
const int num_blocks = nn/4 + ((nn % 4) ? 1 : 0);
t_timer timer;
if(params->dodebug)
start(&timer);
gpu_p2p_interactions<<<num_blocks, block_sz>>>(d_nodes, d_x, d_y, d_z, d_w, d_ax, d_ay, d_az, d_p, d_p2p_interactions, d_p2p_sizes, d_p2p_offsets, nn);
gpu_m2l_interactions<<<num_blocks, block_sz>>>(d_nodes, d_m_real, d_m_imag, d_l_real, d_l_imag, d_m2l_interactions, d_m2l_sizes, d_m2l_offsets, nn);
CUDACHK(cudaPeekAtLastError());
CUDACHK(cudaDeviceSynchronize());
if(params->dodebug)
{
stop(&timer);
printf("tot p2p = %zu, tot_m2l = %zu\n", tot_p2p, tot_m2l);
printf("total memory to allocate to GPU = %zu MB\n", (sizeof(int)*(size_t)(4*nn + tot_p2p + tot_m2l))/1024/1024);
printf("----------\n");
printf("GPU elapsed time = %f\n", timer.elapsed);
printf("DTT elapsed time = %f\n", dtt_timer.elapsed);
printf("MEM elapsed time = %f\n", transfer_timer.elapsed);
printf("Total elapsed time = %f\n", timer.elapsed + dtt_timer.elapsed + transfer_timer.elapsed);
printf("----------\n");
}
CUDACHK(cudaMemcpy(¶ms->ax[0], d_ax, sizeof(TYPE)*np, cudaMemcpyDeviceToHost));
CUDACHK(cudaMemcpy(¶ms->ay[0], d_ay, sizeof(TYPE)*np, cudaMemcpyDeviceToHost));
CUDACHK(cudaMemcpy(¶ms->az[0], d_az, sizeof(TYPE)*np, cudaMemcpyDeviceToHost));
CUDACHK(cudaMemcpy(¶ms->p[0] , d_p, sizeof(TYPE)*np, cudaMemcpyDeviceToHost));
TYPE* l_real = (TYPE*)malloc(sizeof(TYPE)*nm);
TYPE* l_imag = (TYPE*)malloc(sizeof(TYPE)*nm);
CUDACHK(cudaMemcpy(l_real, d_l_real, sizeof(TYPE)*nm, cudaMemcpyDeviceToHost));
CUDACHK(cudaMemcpy(l_imag, d_l_imag, sizeof(TYPE)*nm, cudaMemcpyDeviceToHost));
CUDACHK(cudaMemcpy(¶ms->L_array_real[0], d_l_real, sizeof(TYPE)*nm, cudaMemcpyDeviceToHost));
CUDACHK(cudaMemcpy(¶ms->L_array_imag[0], d_l_imag, sizeof(TYPE)*nm, cudaMemcpyDeviceToHost));
}
|
e5e3aeb1890f327bfb0e5e19b56e76abe5d28596.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// kernels.cu
// diffusion3d-GPU
//
// Created by Manuel Diaz on 7/26/16.
// Copyright 2016 Manuel Diaz. All rights reserved.
//
extern "C" {
#include "acoustics3d.h"
}
#define checkCuda(error) __checkCuda(error, __FILE__, __LINE__)
__constant__ REAL d_kx;
__constant__ REAL d_ky;
__constant__ REAL d_kz;
/*********************************************/
/* A method for checking error in CUDA calls */
/*********************************************/
inline void __checkCuda(hipError_t error, const char *file, const int line)
{
#if defined(DISPL)
if (error != hipSuccess)
{
printf("checkCuda error at %s:%i: %s\n", file, line, hipGetErrorString(hipGetLastError()));
exit(-1);
}
#endif
return;
}
/********************/
/* Laplace Operator */
/********************/
__global__ void Compute_Laplace3d_Async(
const REAL * __restrict__ u,
REAL * __restrict__ Lu,
const unsigned int px, // allocation pitch
const unsigned int Nx,
const unsigned int Ny,
const unsigned int _Nz,
const unsigned int kstart,
const unsigned int kstop,
const unsigned int loop_z)
{
register REAL above2;
register REAL above;
register REAL center;
register REAL below;
register REAL below2;
unsigned int z, XY, Nx2, XY2;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * loop_z;
k = MAX(kstart,k);
XY=px*Ny; Nx2=px+px; XY2=XY+XY;
int o=i+px*j+XY*k;
if (i>2 && i<Nx-3 && j>2 && j<Ny-3)
{
below2=u[o-XY2]; below=u[o-XY]; center=u[o]; above=u[o+XY]; above2=u[o+XY2];
Lu[o] = d_kx * (- u[o-2] + 16*u[o-1] - 30*center + 16*u[o+1] - u[o+2]) +
d_ky * (-u[o-Nx2]+ 16*u[o-px]- 30*center + 16*u[o+px]- u[o+Nx2]) +
d_kz * (- below2 + 16*below - 30*center + 16* above - above2 );
for(z = 1; z < loop_z; z++)
{
k += 1;
if (k < MIN(kstop,_Nz+1))
{
o=o+XY; below2=below; below=center; center=above; above=above2; above2=u[o+XY2];
Lu[o] = d_kx * (- u[o-2] + 16*u[o-1] - 30*center + 16*u[o+1] - u[o+2]) +
d_ky * (-u[o-Nx2]+ 16*u[o-px]- 30*center + 16*u[o+px]- u[o+Nx2]) +
d_kz * (- below2 + 16* below - 30*center + 16* above - above2 );
}
}
}
// else : do nothing!
}
/***********************/
/* Runge Kutta Methods */ // <==== this is perfectly parallel!
/***********************/
__global__ void Compute_RK(
REAL * __restrict__ u,
const REAL * __restrict__ uo,
const REAL * __restrict__ Lu,
const unsigned int step,
const unsigned int pitch,
const unsigned int nx,
const unsigned int ny,
const unsigned int nz,
const REAL dt)
{
// Compute Runge-Kutta step, local threads indexes
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
// compute single index
int o=i+pitch*j+pitch*ny*k;
// update only internal cells
if (i>1 && i<nx-2 && j>1 && j<ny-2 && k>1 && k<nz-2)
{
switch (step) {
case 1: // step 1
u[o] = uo[o]+dt*(Lu[o]);
break;
case 2: // step 2
u[o] = 0.75*uo[o]+0.25*(u[o]+dt*(Lu[o]));
break;
case 3: // step 3
u[o] = (uo[o]+2*(u[o]+dt*(Lu[o])))/3;
break;
}
}
// else : do nothing!
}
__global__ void Compute_RK_Async(
REAL * __restrict__ q,
const REAL * __restrict__ qo,
const REAL * __restrict__ Lq,
const unsigned int step,
const unsigned int pitch,
const unsigned int Nx,
const unsigned int Ny,
const unsigned int _Nz,
const unsigned int kstart,
const unsigned int kstop,
const unsigned int loop_z,
const REAL dt)
{
int z, XY = pitch*Ny;
// local threads indexes
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockIdx.z * loop_z;
k = MAX(kstart,k);
// Single index
int o=i+pitch*j+XY*k;
// Compute Runge-Kutta step only on internal cells
if (i>1 && i<Nx-2 && j>1 && j<Ny-2)
{
for(z = 0; z < loop_z; z++)
{
if (k < MIN(kstop,_Nz-2))
{
switch (step) {
case 1: // step 1
q[o] = qo[o]+dt*(Lq[o]); break;
case 2: // step 2
q[o] = 0.75*qo[o]+0.25*(q[o]+dt*(Lq[o])); break;
case 3: // step 3
q[o] = (qo[o]+2*(q[o]+dt*(Lq[o])))/3; break;
}
o += XY;
}
k += 1;
}
}
}
/*********************/
/* Function Wrappers */
/*********************/
extern "C" void CopyToConstantMemory(REAL kx, REAL ky, REAL kz)
{
checkCuda(hipMemcpyToSymbol(d_kx, &kx, sizeof(REAL), 0, hipMemcpyHostToDevice));
checkCuda(hipMemcpyToSymbol(d_ky, &ky, sizeof(REAL), 0, hipMemcpyHostToDevice));
checkCuda(hipMemcpyToSymbol(d_kz, &kz, sizeof(REAL), 0, hipMemcpyHostToDevice));
}
extern "C" void Call_Lu3d(dim3 numBlocks, dim3 threadsPerBlock, hipStream_t aStream,
unsigned int pitch, unsigned int nx, unsigned int ny, unsigned int nz, REAL *q, REAL *Lq)
{
hipLaunchKernelGGL(( Compute_Laplace3d_Async), dim3(numBlocks),dim3(threadsPerBlock),0,aStream, q,Lq,pitch,nx,ny,nz,3,nz-2,k_loop);
}
extern "C" void Call_RK3d(dim3 numBlocks, dim3 threadsPerBlock, hipStream_t aStream,
unsigned int step, unsigned int pitch, unsigned int nx, unsigned int ny, unsigned int nz, REAL dt, REAL *q, REAL *qo, REAL *Lq)
{
hipLaunchKernelGGL(( Compute_RK), dim3(numBlocks),dim3(threadsPerBlock),0,aStream, q,qo,Lq,step,pitch,nx,ny,nz,dt);
// Compute_RK_Async<<<numBlocks,threadsPerBlock,0,aStream>>>(q,qo,Lq,step,pitch,nx,ny,nz,3,nz-2,k_loop,dt);
}
|
e5e3aeb1890f327bfb0e5e19b56e76abe5d28596.cu
|
//
// kernels.cu
// diffusion3d-GPU
//
// Created by Manuel Diaz on 7/26/16.
// Copyright © 2016 Manuel Diaz. All rights reserved.
//
extern "C" {
#include "acoustics3d.h"
}
#define checkCuda(error) __checkCuda(error, __FILE__, __LINE__)
__constant__ REAL d_kx;
__constant__ REAL d_ky;
__constant__ REAL d_kz;
/*********************************************/
/* A method for checking error in CUDA calls */
/*********************************************/
inline void __checkCuda(cudaError_t error, const char *file, const int line)
{
#if defined(DISPL)
if (error != cudaSuccess)
{
printf("checkCuda error at %s:%i: %s\n", file, line, cudaGetErrorString(cudaGetLastError()));
exit(-1);
}
#endif
return;
}
/********************/
/* Laplace Operator */
/********************/
__global__ void Compute_Laplace3d_Async(
const REAL * __restrict__ u,
REAL * __restrict__ Lu,
const unsigned int px, // allocation pitch
const unsigned int Nx,
const unsigned int Ny,
const unsigned int _Nz,
const unsigned int kstart,
const unsigned int kstop,
const unsigned int loop_z)
{
register REAL above2;
register REAL above;
register REAL center;
register REAL below;
register REAL below2;
unsigned int z, XY, Nx2, XY2;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * loop_z;
k = MAX(kstart,k);
XY=px*Ny; Nx2=px+px; XY2=XY+XY;
int o=i+px*j+XY*k;
if (i>2 && i<Nx-3 && j>2 && j<Ny-3)
{
below2=u[o-XY2]; below=u[o-XY]; center=u[o]; above=u[o+XY]; above2=u[o+XY2];
Lu[o] = d_kx * (- u[o-2] + 16*u[o-1] - 30*center + 16*u[o+1] - u[o+2]) +
d_ky * (-u[o-Nx2]+ 16*u[o-px]- 30*center + 16*u[o+px]- u[o+Nx2]) +
d_kz * (- below2 + 16*below - 30*center + 16* above - above2 );
for(z = 1; z < loop_z; z++)
{
k += 1;
if (k < MIN(kstop,_Nz+1))
{
o=o+XY; below2=below; below=center; center=above; above=above2; above2=u[o+XY2];
Lu[o] = d_kx * (- u[o-2] + 16*u[o-1] - 30*center + 16*u[o+1] - u[o+2]) +
d_ky * (-u[o-Nx2]+ 16*u[o-px]- 30*center + 16*u[o+px]- u[o+Nx2]) +
d_kz * (- below2 + 16* below - 30*center + 16* above - above2 );
}
}
}
// else : do nothing!
}
/***********************/
/* Runge Kutta Methods */ // <==== this is perfectly parallel!
/***********************/
__global__ void Compute_RK(
REAL * __restrict__ u,
const REAL * __restrict__ uo,
const REAL * __restrict__ Lu,
const unsigned int step,
const unsigned int pitch,
const unsigned int nx,
const unsigned int ny,
const unsigned int nz,
const REAL dt)
{
// Compute Runge-Kutta step, local threads indexes
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
// compute single index
int o=i+pitch*j+pitch*ny*k;
// update only internal cells
if (i>1 && i<nx-2 && j>1 && j<ny-2 && k>1 && k<nz-2)
{
switch (step) {
case 1: // step 1
u[o] = uo[o]+dt*(Lu[o]);
break;
case 2: // step 2
u[o] = 0.75*uo[o]+0.25*(u[o]+dt*(Lu[o]));
break;
case 3: // step 3
u[o] = (uo[o]+2*(u[o]+dt*(Lu[o])))/3;
break;
}
}
// else : do nothing!
}
__global__ void Compute_RK_Async(
REAL * __restrict__ q,
const REAL * __restrict__ qo,
const REAL * __restrict__ Lq,
const unsigned int step,
const unsigned int pitch,
const unsigned int Nx,
const unsigned int Ny,
const unsigned int _Nz,
const unsigned int kstart,
const unsigned int kstop,
const unsigned int loop_z,
const REAL dt)
{
int z, XY = pitch*Ny;
// local threads indexes
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockIdx.z * loop_z;
k = MAX(kstart,k);
// Single index
int o=i+pitch*j+XY*k;
// Compute Runge-Kutta step only on internal cells
if (i>1 && i<Nx-2 && j>1 && j<Ny-2)
{
for(z = 0; z < loop_z; z++)
{
if (k < MIN(kstop,_Nz-2))
{
switch (step) {
case 1: // step 1
q[o] = qo[o]+dt*(Lq[o]); break;
case 2: // step 2
q[o] = 0.75*qo[o]+0.25*(q[o]+dt*(Lq[o])); break;
case 3: // step 3
q[o] = (qo[o]+2*(q[o]+dt*(Lq[o])))/3; break;
}
o += XY;
}
k += 1;
}
}
}
/*********************/
/* Function Wrappers */
/*********************/
extern "C" void CopyToConstantMemory(REAL kx, REAL ky, REAL kz)
{
checkCuda(cudaMemcpyToSymbol(d_kx, &kx, sizeof(REAL), 0, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpyToSymbol(d_ky, &ky, sizeof(REAL), 0, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpyToSymbol(d_kz, &kz, sizeof(REAL), 0, cudaMemcpyHostToDevice));
}
extern "C" void Call_Lu3d(dim3 numBlocks, dim3 threadsPerBlock, cudaStream_t aStream,
unsigned int pitch, unsigned int nx, unsigned int ny, unsigned int nz, REAL *q, REAL *Lq)
{
Compute_Laplace3d_Async<<<numBlocks,threadsPerBlock,0,aStream>>>(q,Lq,pitch,nx,ny,nz,3,nz-2,k_loop);
}
extern "C" void Call_RK3d(dim3 numBlocks, dim3 threadsPerBlock, cudaStream_t aStream,
unsigned int step, unsigned int pitch, unsigned int nx, unsigned int ny, unsigned int nz, REAL dt, REAL *q, REAL *qo, REAL *Lq)
{
Compute_RK<<<numBlocks,threadsPerBlock,0,aStream>>>(q,qo,Lq,step,pitch,nx,ny,nz,dt);
// Compute_RK_Async<<<numBlocks,threadsPerBlock,0,aStream>>>(q,qo,Lq,step,pitch,nx,ny,nz,3,nz-2,k_loop,dt);
}
|
e3a828d22c70d658154b6756f7cf6c8bb0dda9af.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <unittest/unittest.h>
#include <thrust/gather.h>
#include <thrust/fill.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/sequence.h>
__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN
template <class Vector>
void TestGatherSimple(void)
{
typedef typename Vector::value_type T;
Vector map(5); // gather indices
Vector src(8); // source vector
Vector dst(5); // destination vector
map[0] = 6; map[1] = 2; map[2] = 1; map[3] = 7; map[4] = 2;
src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; src[5] = 5; src[6] = 6; src[7] = 7;
dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0;
thrust::gather(map.begin(), map.end(), src.begin(), dst.begin());
ASSERT_EQUAL(dst[0], 6);
ASSERT_EQUAL(dst[1], 2);
ASSERT_EQUAL(dst[2], 1);
ASSERT_EQUAL(dst[3], 7);
ASSERT_EQUAL(dst[4], 2);
}
DECLARE_VECTOR_UNITTEST(TestGatherSimple);
template <typename T>
void TestGather(const size_t n)
{
const size_t source_size = ::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
// gather destination
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::gather(h_map.begin(), h_map.end(), h_source.begin(), h_output.begin());
thrust::gather(d_map.begin(), d_map.end(), d_source.begin(), d_output.begin());
ASSERT_EQUAL(h_output, d_output);
}
DECLARE_VARIABLE_UNITTEST(TestGather);
template <typename T>
void TestGatherToDiscardIterator(const size_t n)
{
const size_t source_size = ::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
thrust::discard_iterator<> h_result =
thrust::gather(h_map.begin(), h_map.end(), h_source.begin(), thrust::make_discard_iterator());
thrust::discard_iterator<> d_result =
thrust::gather(d_map.begin(), d_map.end(), d_source.begin(), thrust::make_discard_iterator());
thrust::discard_iterator<> reference(n);
ASSERT_EQUAL_QUIET(reference, h_result);
ASSERT_EQUAL_QUIET(reference, d_result);
}
DECLARE_VARIABLE_UNITTEST(TestGatherToDiscardIterator);
template <class Vector>
void TestGatherIfSimple(void)
{
typedef typename Vector::value_type T;
Vector flg(5); // predicate array
Vector map(5); // gather indices
Vector src(8); // source vector
Vector dst(5); // destination vector
flg[0] = 0; flg[1] = 1; flg[2] = 0; flg[3] = 1; flg[4] = 0;
map[0] = 6; map[1] = 2; map[2] = 1; map[3] = 7; map[4] = 2;
src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; src[5] = 5; src[6] = 6; src[7] = 7;
dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0;
thrust::gather_if(map.begin(), map.end(), flg.begin(), src.begin(), dst.begin());
ASSERT_EQUAL(dst[0], 0);
ASSERT_EQUAL(dst[1], 2);
ASSERT_EQUAL(dst[2], 0);
ASSERT_EQUAL(dst[3], 7);
ASSERT_EQUAL(dst[4], 0);
}
DECLARE_VECTOR_UNITTEST(TestGatherIfSimple);
template <typename T>
struct is_even_gather_if
{
__host__ __device__
bool operator()(const T i) const
{
return (i % 2) == 0;
}
};
template <typename T>
void TestGatherIf(const size_t n)
{
const size_t source_size = ::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
// gather stencil
thrust::host_vector<unsigned int> h_stencil = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_stencil[i] = h_stencil[i] % 2;
thrust::device_vector<unsigned int> d_stencil = h_stencil;
// gather destination
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::gather_if(h_map.begin(), h_map.end(), h_stencil.begin(), h_source.begin(), h_output.begin(), is_even_gather_if<unsigned int>());
thrust::gather_if(d_map.begin(), d_map.end(), d_stencil.begin(), d_source.begin(), d_output.begin(), is_even_gather_if<unsigned int>());
ASSERT_EQUAL(h_output, d_output);
}
DECLARE_VARIABLE_UNITTEST(TestGatherIf);
template <typename T>
void TestGatherIfToDiscardIterator(const size_t n)
{
const size_t source_size = ::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
// gather stencil
thrust::host_vector<unsigned int> h_stencil = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_stencil[i] = h_stencil[i] % 2;
thrust::device_vector<unsigned int> d_stencil = h_stencil;
thrust::discard_iterator<> h_result =
thrust::gather_if(h_map.begin(), h_map.end(), h_stencil.begin(), h_source.begin(), thrust::make_discard_iterator(), is_even_gather_if<unsigned int>());
thrust::discard_iterator<> d_result =
thrust::gather_if(d_map.begin(), d_map.end(), d_stencil.begin(), d_source.begin(), thrust::make_discard_iterator(), is_even_gather_if<unsigned int>());
thrust::discard_iterator<> reference(n);
ASSERT_EQUAL_QUIET(reference, h_result);
ASSERT_EQUAL_QUIET(reference, d_result);
}
DECLARE_VARIABLE_UNITTEST(TestGatherIfToDiscardIterator);
template <typename Vector>
void TestGatherCountingIterator(void)
{
#if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC) && (_DEBUG != 0)
KNOWN_FAILURE;
#endif
typedef typename Vector::value_type T;
Vector source(10);
thrust::sequence(source.begin(), source.end(), 0);
Vector map(10);
thrust::sequence(map.begin(), map.end(), 0);
Vector output(10);
// source has any_space_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(map.begin(),
map.end(),
thrust::make_counting_iterator(0),
output.begin());
ASSERT_EQUAL(output, map);
// map has any_space_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(thrust::make_counting_iterator(0),
thrust::make_counting_iterator((int)source.size()),
source.begin(),
output.begin());
ASSERT_EQUAL(output, map);
// source and map have any_space_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(thrust::make_counting_iterator(0),
thrust::make_counting_iterator((int)output.size()),
thrust::make_counting_iterator(0),
output.begin());
ASSERT_EQUAL(output, map);
}
DECLARE_VECTOR_UNITTEST(TestGatherCountingIterator);
__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END
|
e3a828d22c70d658154b6756f7cf6c8bb0dda9af.cu
|
#include <unittest/unittest.h>
#include <thrust/gather.h>
#include <thrust/fill.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/sequence.h>
__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN
template <class Vector>
void TestGatherSimple(void)
{
typedef typename Vector::value_type T;
Vector map(5); // gather indices
Vector src(8); // source vector
Vector dst(5); // destination vector
map[0] = 6; map[1] = 2; map[2] = 1; map[3] = 7; map[4] = 2;
src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; src[5] = 5; src[6] = 6; src[7] = 7;
dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0;
thrust::gather(map.begin(), map.end(), src.begin(), dst.begin());
ASSERT_EQUAL(dst[0], 6);
ASSERT_EQUAL(dst[1], 2);
ASSERT_EQUAL(dst[2], 1);
ASSERT_EQUAL(dst[3], 7);
ASSERT_EQUAL(dst[4], 2);
}
DECLARE_VECTOR_UNITTEST(TestGatherSimple);
template <typename T>
void TestGather(const size_t n)
{
const size_t source_size = std::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
// gather destination
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::gather(h_map.begin(), h_map.end(), h_source.begin(), h_output.begin());
thrust::gather(d_map.begin(), d_map.end(), d_source.begin(), d_output.begin());
ASSERT_EQUAL(h_output, d_output);
}
DECLARE_VARIABLE_UNITTEST(TestGather);
template <typename T>
void TestGatherToDiscardIterator(const size_t n)
{
const size_t source_size = std::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
thrust::discard_iterator<> h_result =
thrust::gather(h_map.begin(), h_map.end(), h_source.begin(), thrust::make_discard_iterator());
thrust::discard_iterator<> d_result =
thrust::gather(d_map.begin(), d_map.end(), d_source.begin(), thrust::make_discard_iterator());
thrust::discard_iterator<> reference(n);
ASSERT_EQUAL_QUIET(reference, h_result);
ASSERT_EQUAL_QUIET(reference, d_result);
}
DECLARE_VARIABLE_UNITTEST(TestGatherToDiscardIterator);
template <class Vector>
void TestGatherIfSimple(void)
{
typedef typename Vector::value_type T;
Vector flg(5); // predicate array
Vector map(5); // gather indices
Vector src(8); // source vector
Vector dst(5); // destination vector
flg[0] = 0; flg[1] = 1; flg[2] = 0; flg[3] = 1; flg[4] = 0;
map[0] = 6; map[1] = 2; map[2] = 1; map[3] = 7; map[4] = 2;
src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; src[5] = 5; src[6] = 6; src[7] = 7;
dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0;
thrust::gather_if(map.begin(), map.end(), flg.begin(), src.begin(), dst.begin());
ASSERT_EQUAL(dst[0], 0);
ASSERT_EQUAL(dst[1], 2);
ASSERT_EQUAL(dst[2], 0);
ASSERT_EQUAL(dst[3], 7);
ASSERT_EQUAL(dst[4], 0);
}
DECLARE_VECTOR_UNITTEST(TestGatherIfSimple);
template <typename T>
struct is_even_gather_if
{
__host__ __device__
bool operator()(const T i) const
{
return (i % 2) == 0;
}
};
template <typename T>
void TestGatherIf(const size_t n)
{
const size_t source_size = std::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
// gather stencil
thrust::host_vector<unsigned int> h_stencil = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_stencil[i] = h_stencil[i] % 2;
thrust::device_vector<unsigned int> d_stencil = h_stencil;
// gather destination
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::gather_if(h_map.begin(), h_map.end(), h_stencil.begin(), h_source.begin(), h_output.begin(), is_even_gather_if<unsigned int>());
thrust::gather_if(d_map.begin(), d_map.end(), d_stencil.begin(), d_source.begin(), d_output.begin(), is_even_gather_if<unsigned int>());
ASSERT_EQUAL(h_output, d_output);
}
DECLARE_VARIABLE_UNITTEST(TestGatherIf);
template <typename T>
void TestGatherIfToDiscardIterator(const size_t n)
{
const size_t source_size = std::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
// gather stencil
thrust::host_vector<unsigned int> h_stencil = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_stencil[i] = h_stencil[i] % 2;
thrust::device_vector<unsigned int> d_stencil = h_stencil;
thrust::discard_iterator<> h_result =
thrust::gather_if(h_map.begin(), h_map.end(), h_stencil.begin(), h_source.begin(), thrust::make_discard_iterator(), is_even_gather_if<unsigned int>());
thrust::discard_iterator<> d_result =
thrust::gather_if(d_map.begin(), d_map.end(), d_stencil.begin(), d_source.begin(), thrust::make_discard_iterator(), is_even_gather_if<unsigned int>());
thrust::discard_iterator<> reference(n);
ASSERT_EQUAL_QUIET(reference, h_result);
ASSERT_EQUAL_QUIET(reference, d_result);
}
DECLARE_VARIABLE_UNITTEST(TestGatherIfToDiscardIterator);
template <typename Vector>
void TestGatherCountingIterator(void)
{
#if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC) && (_DEBUG != 0)
KNOWN_FAILURE;
#endif
typedef typename Vector::value_type T;
Vector source(10);
thrust::sequence(source.begin(), source.end(), 0);
Vector map(10);
thrust::sequence(map.begin(), map.end(), 0);
Vector output(10);
// source has any_space_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(map.begin(),
map.end(),
thrust::make_counting_iterator(0),
output.begin());
ASSERT_EQUAL(output, map);
// map has any_space_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(thrust::make_counting_iterator(0),
thrust::make_counting_iterator((int)source.size()),
source.begin(),
output.begin());
ASSERT_EQUAL(output, map);
// source and map have any_space_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(thrust::make_counting_iterator(0),
thrust::make_counting_iterator((int)output.size()),
thrust::make_counting_iterator(0),
output.begin());
ASSERT_EQUAL(output, map);
}
DECLARE_VECTOR_UNITTEST(TestGatherCountingIterator);
__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END
|
0cc35f0294dcff7398adf4b9727461cd10fdca13.hip
|
// !!! This is a file automatically generated by hipify!!!
/** \file bc_open.cu: contains a CUDA kernel for open inner and outer boundary condition.
*/
#define __CUDA 1
#include "fargo.h"
#undef __CUDA
#include <stdarg.h>
#include <helper_cuda.h>
#include <hip/hip_runtime.h>
// BLOCK_X : in azimuth
//#define BLOCK_X DEF_BLOCK_X_OPENBC
#define BLOCK_X 16//64
#define BLOCK_Y 1
__global__ void kernel_openbc_in (double *vrad,
double *vtheta,
double *rho,
double *energy,
double *dust_size,
const int ns,
const double SigmaMed,
const double VthetaMed0,
const double VthetaMed1,
bool dust) {
const int jg = blockDim.x * blockIdx.x + threadIdx.x;
// gas: do not allow inflow
if (!dust) {
rho[jg] = rho[jg+ns];
if (energy != NULL)
energy[jg] = energy[jg+ns];
// do not allow inflow for gas
if (vrad[jg+ns+ns] > 0.0 || rho[jg+ns] < SigmaMed) {
vrad[jg+ns] = 0.0;
}
else {
vrad[jg+ns] = vrad[jg+ns+ns];
}
}
// dust: inflow is possible for dust
else {
/*
vrad[jg] = 0.0;
vrad[jg+ns] = 0.0;
*/
//vtheta[jg] = vtheta[jg+ns];
rho[jg] = rho[jg+ns];
if (vrad[jg+ns+ns] > 0.0 || rho[jg+ns] < SigmaMed) {
vrad[jg] = 0.0;
//vrad[jg+ns] = 0.0;
}
else
vrad[jg+ns] = vrad[jg+ns+ns];
}
// size of grown dust
if (dust_size != NULL) {
dust_size[jg] = dust_size[jg+ns];
}
}
__global__ void kernel_openbc_out (double *vrad,
double *vtheta,
double *rho,
double *energy,
double *dust_size,
const int nr,
const int ns,
const double SigmaMed,
bool dust) {
const int jg = blockDim.x * blockIdx.x + threadIdx.x;
// gas: do not allow inflow
if (!dust) {
rho[jg+(nr-1)*ns] = rho[jg+(nr-2)*ns];
if (energy != NULL)
energy[jg+(nr-1)*ns] = energy[jg+(nr-2)*ns];
if (vrad[jg+(nr-2)*ns] < 0.0 || rho[jg+(nr-2)*ns] < SigmaMed)
vrad[jg+(nr-1)*ns] = 0.0;
else
vrad[jg+(nr-1)*ns] = vrad[jg+(nr-2)*ns];
}
// dust: inflow is possible for dust
else {
rho[jg+(nr-1)*ns] = rho[jg+(nr-2)*ns];
//rho[jg+(nr-1)*ns] = SigmaMed;//rho[jg+(nr-2)*ns];
//rho[jg+(nr-2)*ns] = SigmaMed;
if (vrad[jg+(nr-2)*ns] < 0.0) {// || rho[jg+(nr-2)*ns] < SigmaMed) {
vrad[jg+(nr-1)*ns] = 0.0;
vrad[jg+(nr-2)*ns] = 0.0;
}
else
vrad[jg+(nr-1)*ns] = vrad[jg+(nr-2)*ns];
}
// size of grown dust
if (dust_size != NULL) {
dust_size[jg+(nr-1)*ns] = dust_size[jg+(nr-2)*ns];
}
}
extern "C" void OpenBoundary_gpu (PolarGrid *Vrad, PolarGrid *Vtheta, PolarGrid *Rho, PolarGrid *Energy, int where) {
int nr = Vrad->Nrad;
int ns = Vrad->Nsec;
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid;
grid.x = (ns+block.x-1)/block.x;
double *energy_gpu_field = NULL;
if (Adiabatic)
energy_gpu_field = Energy->gpu_field;
if (where == INNER) {
int nb_block_y = (1+BLOCK_Y-1)/BLOCK_Y;
grid.y = nb_block_y;
hipLaunchKernelGGL(( kernel_openbc_in) , dim3(grid), dim3(block) , 0, 0, Vrad->gpu_field,
Vtheta->gpu_field,
Rho->gpu_field,
energy_gpu_field,
NULL,
ns,
SigmaMed[0],
GasVelThetaMed[0],
GasVelThetaMed[1],
false); // dust=false
hipDeviceSynchronize();
getLastCudaError ("kernel_openbc_in failed");
}
if (where == OUTER) {
int nb_block_y = (1+BLOCK_Y-1)/BLOCK_Y;
grid.y = nb_block_y;
hipLaunchKernelGGL(( kernel_openbc_out) , dim3(grid), dim3(block) , 0, 0, Vrad->gpu_field,
Vtheta->gpu_field,
Rho->gpu_field,
energy_gpu_field,
NULL,
nr,
ns,
SigmaMed[nr-2],
false); // dust=false
hipDeviceSynchronize();
getLastCudaError ("kernel_openbc_out failed");
}
}
extern "C" void OpenBoundaryDust_gpu (PolarGrid *Vrad, PolarGrid *Vtheta, PolarGrid *Rho, int DustBin, int where) {
int nr = Vrad->Nrad;
int ns = Vrad->Nsec;
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid;
grid.x = (ns+block.x-1)/block.x;
double *dust_size_gpu_field = NULL;
if (DustGrowth) {
dust_size_gpu_field = dust_size->gpu_field;
}
if (where == INNER) {
int nb_block_y = (1+BLOCK_Y-1)/BLOCK_Y;
grid.y = nb_block_y;
hipLaunchKernelGGL(( kernel_openbc_in) , dim3(grid), dim3(block) , 0, 0, Vrad->gpu_field,
Vtheta->gpu_field,
Rho->gpu_field,
NULL,
dust_size_gpu_field,
ns,
SigmaMed[0]*DustMassBin[DustBin],
0,
0,
true);
hipDeviceSynchronize();
getLastCudaError ("kernel_openbc_in failed");
}
if (where == OUTER) {
int nb_block_y = (1+BLOCK_Y-1)/BLOCK_Y;
grid.y = nb_block_y;
hipLaunchKernelGGL(( kernel_openbc_out) , dim3(grid), dim3(block) , 0, 0, Vrad->gpu_field,
Vtheta->gpu_field,
Rho->gpu_field,
NULL,
dust_size_gpu_field,
nr,
ns,
SigmaMed[nr-2]*DustMassBin[DustBin],
true);
hipDeviceSynchronize();
getLastCudaError ("kernel_openbc_out failed");
}
}
|
0cc35f0294dcff7398adf4b9727461cd10fdca13.cu
|
/** \file bc_open.cu: contains a CUDA kernel for open inner and outer boundary condition.
*/
#define __CUDA 1
#include "fargo.h"
#undef __CUDA
#include <stdarg.h>
#include <helper_cuda.h>
#include <cuda.h>
// BLOCK_X : in azimuth
//#define BLOCK_X DEF_BLOCK_X_OPENBC
#define BLOCK_X 16//64
#define BLOCK_Y 1
__global__ void kernel_openbc_in (double *vrad,
double *vtheta,
double *rho,
double *energy,
double *dust_size,
const int ns,
const double SigmaMed,
const double VthetaMed0,
const double VthetaMed1,
bool dust) {
const int jg = blockDim.x * blockIdx.x + threadIdx.x;
// gas: do not allow inflow
if (!dust) {
rho[jg] = rho[jg+ns];
if (energy != NULL)
energy[jg] = energy[jg+ns];
// do not allow inflow for gas
if (vrad[jg+ns+ns] > 0.0 || rho[jg+ns] < SigmaMed) {
vrad[jg+ns] = 0.0;
}
else {
vrad[jg+ns] = vrad[jg+ns+ns];
}
}
// dust: inflow is possible for dust
else {
/*
vrad[jg] = 0.0;
vrad[jg+ns] = 0.0;
*/
//vtheta[jg] = vtheta[jg+ns];
rho[jg] = rho[jg+ns];
if (vrad[jg+ns+ns] > 0.0 || rho[jg+ns] < SigmaMed) {
vrad[jg] = 0.0;
//vrad[jg+ns] = 0.0;
}
else
vrad[jg+ns] = vrad[jg+ns+ns];
}
// size of grown dust
if (dust_size != NULL) {
dust_size[jg] = dust_size[jg+ns];
}
}
__global__ void kernel_openbc_out (double *vrad,
double *vtheta,
double *rho,
double *energy,
double *dust_size,
const int nr,
const int ns,
const double SigmaMed,
bool dust) {
const int jg = blockDim.x * blockIdx.x + threadIdx.x;
// gas: do not allow inflow
if (!dust) {
rho[jg+(nr-1)*ns] = rho[jg+(nr-2)*ns];
if (energy != NULL)
energy[jg+(nr-1)*ns] = energy[jg+(nr-2)*ns];
if (vrad[jg+(nr-2)*ns] < 0.0 || rho[jg+(nr-2)*ns] < SigmaMed)
vrad[jg+(nr-1)*ns] = 0.0;
else
vrad[jg+(nr-1)*ns] = vrad[jg+(nr-2)*ns];
}
// dust: inflow is possible for dust
else {
rho[jg+(nr-1)*ns] = rho[jg+(nr-2)*ns];
//rho[jg+(nr-1)*ns] = SigmaMed;//rho[jg+(nr-2)*ns];
//rho[jg+(nr-2)*ns] = SigmaMed;
if (vrad[jg+(nr-2)*ns] < 0.0) {// || rho[jg+(nr-2)*ns] < SigmaMed) {
vrad[jg+(nr-1)*ns] = 0.0;
vrad[jg+(nr-2)*ns] = 0.0;
}
else
vrad[jg+(nr-1)*ns] = vrad[jg+(nr-2)*ns];
}
// size of grown dust
if (dust_size != NULL) {
dust_size[jg+(nr-1)*ns] = dust_size[jg+(nr-2)*ns];
}
}
extern "C" void OpenBoundary_gpu (PolarGrid *Vrad, PolarGrid *Vtheta, PolarGrid *Rho, PolarGrid *Energy, int where) {
int nr = Vrad->Nrad;
int ns = Vrad->Nsec;
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid;
grid.x = (ns+block.x-1)/block.x;
double *energy_gpu_field = NULL;
if (Adiabatic)
energy_gpu_field = Energy->gpu_field;
if (where == INNER) {
int nb_block_y = (1+BLOCK_Y-1)/BLOCK_Y;
grid.y = nb_block_y;
kernel_openbc_in <<< grid, block >>> (Vrad->gpu_field,
Vtheta->gpu_field,
Rho->gpu_field,
energy_gpu_field,
NULL,
ns,
SigmaMed[0],
GasVelThetaMed[0],
GasVelThetaMed[1],
false); // dust=false
cudaThreadSynchronize();
getLastCudaError ("kernel_openbc_in failed");
}
if (where == OUTER) {
int nb_block_y = (1+BLOCK_Y-1)/BLOCK_Y;
grid.y = nb_block_y;
kernel_openbc_out <<< grid, block >>> (Vrad->gpu_field,
Vtheta->gpu_field,
Rho->gpu_field,
energy_gpu_field,
NULL,
nr,
ns,
SigmaMed[nr-2],
false); // dust=false
cudaThreadSynchronize();
getLastCudaError ("kernel_openbc_out failed");
}
}
extern "C" void OpenBoundaryDust_gpu (PolarGrid *Vrad, PolarGrid *Vtheta, PolarGrid *Rho, int DustBin, int where) {
int nr = Vrad->Nrad;
int ns = Vrad->Nsec;
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid;
grid.x = (ns+block.x-1)/block.x;
double *dust_size_gpu_field = NULL;
if (DustGrowth) {
dust_size_gpu_field = dust_size->gpu_field;
}
if (where == INNER) {
int nb_block_y = (1+BLOCK_Y-1)/BLOCK_Y;
grid.y = nb_block_y;
kernel_openbc_in <<< grid, block >>> (Vrad->gpu_field,
Vtheta->gpu_field,
Rho->gpu_field,
NULL,
dust_size_gpu_field,
ns,
SigmaMed[0]*DustMassBin[DustBin],
0,
0,
true);
cudaThreadSynchronize();
getLastCudaError ("kernel_openbc_in failed");
}
if (where == OUTER) {
int nb_block_y = (1+BLOCK_Y-1)/BLOCK_Y;
grid.y = nb_block_y;
kernel_openbc_out <<< grid, block >>> (Vrad->gpu_field,
Vtheta->gpu_field,
Rho->gpu_field,
NULL,
dust_size_gpu_field,
nr,
ns,
SigmaMed[nr-2]*DustMassBin[DustBin],
true);
cudaThreadSynchronize();
getLastCudaError ("kernel_openbc_out failed");
}
}
|
8881bb8d0a20c204b55ee9fc485811d559e46c3e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "process_cross_edges.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *parent = NULL;
hipMalloc(&parent, XSIZE*YSIZE);
Edge *edge_list = NULL;
hipMalloc(&edge_list, XSIZE*YSIZE);
int e = 1;
bool *flag = NULL;
hipMalloc(&flag, XSIZE*YSIZE);
bool *cross_edges = NULL;
hipMalloc(&cross_edges, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
process_cross_edges), dim3(gridBlock),dim3(threadBlock), 0, 0, parent,edge_list,e,flag,cross_edges);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
process_cross_edges), dim3(gridBlock),dim3(threadBlock), 0, 0, parent,edge_list,e,flag,cross_edges);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
process_cross_edges), dim3(gridBlock),dim3(threadBlock), 0, 0, parent,edge_list,e,flag,cross_edges);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
8881bb8d0a20c204b55ee9fc485811d559e46c3e.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "process_cross_edges.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *parent = NULL;
cudaMalloc(&parent, XSIZE*YSIZE);
Edge *edge_list = NULL;
cudaMalloc(&edge_list, XSIZE*YSIZE);
int e = 1;
bool *flag = NULL;
cudaMalloc(&flag, XSIZE*YSIZE);
bool *cross_edges = NULL;
cudaMalloc(&cross_edges, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
process_cross_edges<<<gridBlock,threadBlock>>>(parent,edge_list,e,flag,cross_edges);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
process_cross_edges<<<gridBlock,threadBlock>>>(parent,edge_list,e,flag,cross_edges);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
process_cross_edges<<<gridBlock,threadBlock>>>(parent,edge_list,e,flag,cross_edges);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
63c4b19bdfe2f290da18125e8edf11e34ffef00b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// MP 4 Reduction
// Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1];
// Due Tuesday, January 15, 2013 at 11:59 p.m. PST
#include <wb.h>
#define BLOCK_SIZE 512 //@@ You can change this
#define wbCheck(stmt) do { \\
hipError_t err = stmt; \\
if (err != hipSuccess) { \\
wbLog(ERROR, \"Failed to run stmt \", #stmt); \\
return -1; \\
} \\
} while(0)
__global__ void total(float * input, float * output, int len) {
//@@ Load a segment of the input vector into shared memory
__shared__ float partialSum[2 * BLOCK_SIZE];
unsigned int t = threadIdx.x, start = 2 * blockIdx.x * BLOCK_SIZE;
if (start + t < len)
partialSum[t] = input[start + t];
else
partialSum[t] = 0;
if (start + BLOCK_SIZE + t < len)
partialSum[BLOCK_SIZE + t] = input[start + BLOCK_SIZE + t];
else
partialSum[BLOCK_SIZE + t] = 0;
//@@ Traverse the reduction tree
for (unsigned int stride = BLOCK_SIZE; stride >= 1; stride >>= 1) {
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t+stride];
}
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
if (t == 0)
output[blockIdx.x] = partialSum[0];
}
int main(int argc, char ** argv) {
int ii;
wbArg_t args;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
args = wbArg_read(argc, argv);
wbTime_start(Generic, \"Importing data and creating memory on host\");
hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numInputElements);
numOutputElements = numInputElements / (BLOCK_SIZE<<1);
if (numInputElements % (BLOCK_SIZE<<1)) {
numOutputElements++;
}
hostOutput = (float*) malloc(numOutputElements * sizeof(float));
wbTime_stop(Generic, \"Importing data and creating memory on host\");
wbLog(TRACE, \"The number of input elements in the input is \", numInputElements);
wbLog(TRACE, \"The number of output elements in the input is \", numOutputElements);
wbTime_start(GPU, \"Allocating GPU memory.\");
//@@ Allocate GPU memory here
hipMalloc(&deviceInput, sizeof(float) * numInputElements);
hipMalloc(&deviceOutput, sizeof(float) * numOutputElements);
wbTime_stop(GPU, \"Allocating GPU memory.\");
wbTime_start(GPU, \"Copying input memory to the GPU.\");
//@@ Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, sizeof(float) * numInputElements, hipMemcpyHostToDevice);
wbTime_stop(GPU, \"Copying input memory to the GPU.\");
//@@ Initialize the grid and block dimensions here
dim3 dimGrid(numOutputElements, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
wbTime_start(Compute, \"Performing CUDA computation\");
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( total), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceInput, deviceOutput, numInputElements);
hipDeviceSynchronize();
wbTime_stop(Compute, \"Performing CUDA computation\");
wbTime_start(Copy, \"Copying output memory to the CPU\");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, sizeof(float) * numOutputElements, hipMemcpyDeviceToHost);
wbTime_stop(Copy, \"Copying output memory to the CPU\");
/********************************************************************
* Reduce output vector on the host
* NOTE: One could also perform the reduction of the output vector
* recursively and support any size input. For simplicity, we do not
* require that for this lab.
********************************************************************/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
wbTime_start(GPU, \"Freeing GPU Memory\");
//@@ Free the GPU memory here
hipFree(deviceInput);
hipFree(deviceOutput);
wbTime_stop(GPU, \"Freeing GPU Memory\");
wbSolution(args, hostOutput, 1);
free(hostInput);
free(hostOutput);
return 0;
}
|
63c4b19bdfe2f290da18125e8edf11e34ffef00b.cu
|
// MP 4 Reduction
// Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1];
// Due Tuesday, January 15, 2013 at 11:59 p.m. PST
#include <wb.h>
#define BLOCK_SIZE 512 //@@ You can change this
#define wbCheck(stmt) do { \\
cudaError_t err = stmt; \\
if (err != cudaSuccess) { \\
wbLog(ERROR, \"Failed to run stmt \", #stmt); \\
return -1; \\
} \\
} while(0)
__global__ void total(float * input, float * output, int len) {
//@@ Load a segment of the input vector into shared memory
__shared__ float partialSum[2 * BLOCK_SIZE];
unsigned int t = threadIdx.x, start = 2 * blockIdx.x * BLOCK_SIZE;
if (start + t < len)
partialSum[t] = input[start + t];
else
partialSum[t] = 0;
if (start + BLOCK_SIZE + t < len)
partialSum[BLOCK_SIZE + t] = input[start + BLOCK_SIZE + t];
else
partialSum[BLOCK_SIZE + t] = 0;
//@@ Traverse the reduction tree
for (unsigned int stride = BLOCK_SIZE; stride >= 1; stride >>= 1) {
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t+stride];
}
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
if (t == 0)
output[blockIdx.x] = partialSum[0];
}
int main(int argc, char ** argv) {
int ii;
wbArg_t args;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
args = wbArg_read(argc, argv);
wbTime_start(Generic, \"Importing data and creating memory on host\");
hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numInputElements);
numOutputElements = numInputElements / (BLOCK_SIZE<<1);
if (numInputElements % (BLOCK_SIZE<<1)) {
numOutputElements++;
}
hostOutput = (float*) malloc(numOutputElements * sizeof(float));
wbTime_stop(Generic, \"Importing data and creating memory on host\");
wbLog(TRACE, \"The number of input elements in the input is \", numInputElements);
wbLog(TRACE, \"The number of output elements in the input is \", numOutputElements);
wbTime_start(GPU, \"Allocating GPU memory.\");
//@@ Allocate GPU memory here
cudaMalloc(&deviceInput, sizeof(float) * numInputElements);
cudaMalloc(&deviceOutput, sizeof(float) * numOutputElements);
wbTime_stop(GPU, \"Allocating GPU memory.\");
wbTime_start(GPU, \"Copying input memory to the GPU.\");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, sizeof(float) * numInputElements, cudaMemcpyHostToDevice);
wbTime_stop(GPU, \"Copying input memory to the GPU.\");
//@@ Initialize the grid and block dimensions here
dim3 dimGrid(numOutputElements, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
wbTime_start(Compute, \"Performing CUDA computation\");
//@@ Launch the GPU Kernel here
total<<<dimGrid, dimBlock>>>(deviceInput, deviceOutput, numInputElements);
cudaDeviceSynchronize();
wbTime_stop(Compute, \"Performing CUDA computation\");
wbTime_start(Copy, \"Copying output memory to the CPU\");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, sizeof(float) * numOutputElements, cudaMemcpyDeviceToHost);
wbTime_stop(Copy, \"Copying output memory to the CPU\");
/********************************************************************
* Reduce output vector on the host
* NOTE: One could also perform the reduction of the output vector
* recursively and support any size input. For simplicity, we do not
* require that for this lab.
********************************************************************/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
wbTime_start(GPU, \"Freeing GPU Memory\");
//@@ Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceOutput);
wbTime_stop(GPU, \"Freeing GPU Memory\");
wbSolution(args, hostOutput, 1);
free(hostInput);
free(hostOutput);
return 0;
}
|
3bf1aa2a6af310c2733279e087042ea956eeeb55.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void res_gpu( const double *A, const float *u, float *du, const float *beta) {
*du += (float)((*beta)*(*A)*(*u));
}
// CUDA kernel function
__global__ void op_cuda_res(
const float *__restrict ind_arg0,
float *__restrict ind_arg1,
const int *__restrict opDat1Map,
const double *__restrict arg0,
const float *arg3,
int *ind_map,
short *arg_map,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
float arg2_l[3];
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ float *ind_arg1_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg1_size = ind_arg_sizes[0+blockId*1];
ind_arg1_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*1];
//set shared memory pointers
int nbytes = 0;
ind_arg1_s = (float *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<ind_arg1_size*3; n+=blockDim.x ){
ind_arg1_s[n] = ZERO_float;
}
__syncthreads();
for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){
int col2 = -1;
int map1idx;
int map2idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<3; d++ ){
arg2_l[d] = ZERO_float;
}
map1idx = opDat1Map[n + offset_b + set_size * 1];
map2idx = opDat1Map[n + offset_b + set_size * 0];
//user-supplied kernel call
res_gpu(arg0+(n+offset_b)*3,
ind_arg0+map1idx*2,
arg2_l,
arg3);
col2 = colors[n+offset_b];
}
//store local variables
int arg2_map;
if (col2>=0) {
arg2_map = arg_map[0*set_size+n+offset_b];
}
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg2_l[0] += ind_arg1_s[0+arg2_map*3];
arg2_l[1] += ind_arg1_s[1+arg2_map*3];
arg2_l[2] += ind_arg1_s[2+arg2_map*3];
ind_arg1_s[0+arg2_map*3] = arg2_l[0];
ind_arg1_s[1+arg2_map*3] = arg2_l[1];
ind_arg1_s[2+arg2_map*3] = arg2_l[2];
}
__syncthreads();
}
}
for ( int n=threadIdx.x; n<ind_arg1_size*3; n+=blockDim.x ){
ind_arg1[n%3+ind_arg1_map[n/3]*3] += ind_arg1_s[n];
}
}
//host stub function
void op_par_loop_res(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3){
float*arg3h = (float *)arg3.data;
int nargs = 4;
op_arg args[4];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(0);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
int ninds = 2;
int inds[4] = {-1,0,1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: res\n");
}
//get plan
#ifdef OP_PART_SIZE_0
int part_size = OP_PART_SIZE_0;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get_stage(name,set,part_size,nargs,args,ninds,inds,OP_STAGE_INC);
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(float));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg3.data = OP_consts_h + consts_bytes;
arg3.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg3.data)[d] = arg3h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
mvConstArraysToDevice(consts_bytes);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
int nshared = Plan->nsharedCol[col];
hipLaunchKernelGGL(( op_cuda_res), dim3(nblocks),dim3(nthread),nshared, 0,
(float *)arg1.data_d,
(float *)arg2.data_d,
arg1.map_data_d,
(double*)arg0.data_d,
(float*)arg3.data_d,
Plan->ind_map,
Plan->loc_map,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[0].transfer += Plan->transfer;
OP_kernels[0].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[0].time += wall_t2 - wall_t1;
}
|
3bf1aa2a6af310c2733279e087042ea956eeeb55.cu
|
//
// auto-generated by op2.py
//
//user function
__device__ void res_gpu( const double *A, const float *u, float *du, const float *beta) {
*du += (float)((*beta)*(*A)*(*u));
}
// CUDA kernel function
__global__ void op_cuda_res(
const float *__restrict ind_arg0,
float *__restrict ind_arg1,
const int *__restrict opDat1Map,
const double *__restrict arg0,
const float *arg3,
int *ind_map,
short *arg_map,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
float arg2_l[3];
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ float *ind_arg1_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg1_size = ind_arg_sizes[0+blockId*1];
ind_arg1_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*1];
//set shared memory pointers
int nbytes = 0;
ind_arg1_s = (float *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<ind_arg1_size*3; n+=blockDim.x ){
ind_arg1_s[n] = ZERO_float;
}
__syncthreads();
for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){
int col2 = -1;
int map1idx;
int map2idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<3; d++ ){
arg2_l[d] = ZERO_float;
}
map1idx = opDat1Map[n + offset_b + set_size * 1];
map2idx = opDat1Map[n + offset_b + set_size * 0];
//user-supplied kernel call
res_gpu(arg0+(n+offset_b)*3,
ind_arg0+map1idx*2,
arg2_l,
arg3);
col2 = colors[n+offset_b];
}
//store local variables
int arg2_map;
if (col2>=0) {
arg2_map = arg_map[0*set_size+n+offset_b];
}
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg2_l[0] += ind_arg1_s[0+arg2_map*3];
arg2_l[1] += ind_arg1_s[1+arg2_map*3];
arg2_l[2] += ind_arg1_s[2+arg2_map*3];
ind_arg1_s[0+arg2_map*3] = arg2_l[0];
ind_arg1_s[1+arg2_map*3] = arg2_l[1];
ind_arg1_s[2+arg2_map*3] = arg2_l[2];
}
__syncthreads();
}
}
for ( int n=threadIdx.x; n<ind_arg1_size*3; n+=blockDim.x ){
ind_arg1[n%3+ind_arg1_map[n/3]*3] += ind_arg1_s[n];
}
}
//host stub function
void op_par_loop_res(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3){
float*arg3h = (float *)arg3.data;
int nargs = 4;
op_arg args[4];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(0);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
int ninds = 2;
int inds[4] = {-1,0,1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: res\n");
}
//get plan
#ifdef OP_PART_SIZE_0
int part_size = OP_PART_SIZE_0;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get_stage(name,set,part_size,nargs,args,ninds,inds,OP_STAGE_INC);
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(float));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg3.data = OP_consts_h + consts_bytes;
arg3.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg3.data)[d] = arg3h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
mvConstArraysToDevice(consts_bytes);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
int nshared = Plan->nsharedCol[col];
op_cuda_res<<<nblocks,nthread,nshared>>>(
(float *)arg1.data_d,
(float *)arg2.data_d,
arg1.map_data_d,
(double*)arg0.data_d,
(float*)arg3.data_d,
Plan->ind_map,
Plan->loc_map,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[0].transfer += Plan->transfer;
OP_kernels[0].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[0].time += wall_t2 - wall_t1;
}
|
cf89bf377134e8bfc762b6728a7cc2742119c859.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <iostream>
#include <fstream>
#include <iomanip>
#include <cstdlib>
#include <string>
#include <time.h>
#include <csignal>
using namespace std;
#include "matrix.h"
#include "crbmGpu.h"
using namespace YAMATH;
typedef MatrixGpu Mat;
CRBM::CRBMLayerGpu *abc = NULL;
void signalHandler(int signum)
{
if(abc != NULL)
{
cout << endl;
cout << "!!! Forcing RBM to interrupt learning ... !!!" << endl;
cout << "!!! repeated CTRL+C will stop program without saving !!!" << endl;
cout << endl;
abc->SignalStop();
//clear handler
signal(SIGINT, SIG_DFL);
}
else
{
exit(signum);
}
}
int main(int argc, char** argv)
{
if(argc != 4 && argc != 5)
{
cout << "Too few params!" << endl;
cout << argv[0] << " setting-file model-file input-vector-file [cudadevice-id]" << endl;
cout << "\tmodel-file can be \"-\" for random-model initialization." << endl;
exit(1);
}
if(argc > 4)
{
int device = atoi(argv[4]);
cout << "Device ID: " << device << endl;
hipSetDevice(device);
}
hipblasStatus_t stat;
hipblasHandle_t handle;
cout << "cublas init ..." << flush;
stat = hipblasCreate(&handle);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
cout << " done" << endl;
CRBM::CRBMLayerSetting setting;
setting.loadFromFile(argv[1]);
//register signal SIGINT and signal handler
signal(SIGINT, signalHandler);
Timer timer;
if(string(argv[2]) != "-")
{
cout << "Loading RBM-layer ... " << flush;
abc = new CRBM::CRBMLayerGpu(setting);
abc->Load(string(argv[2]));
//reset loaded setting
abc->ResetSetting(setting);
}
else
{
cout << "Creating RBM-layer ... " << flush;
abc = new CRBM::CRBMLayerGpu(setting);
timer.tac(" ... done in ");
}
MatrixCpu x;
//matrix is transposed!!! => pictures are saved one after another - like in the file => row major ~ col major transposed
//loadMatrix(x, argv[3], true, string(argv[3]) + ".cache");
MatrixLoaderFile loader(argv[3]);
loader.LoadComplete(x, true);
timer.tic();
abc->LearnAll(x, string(argv[3]) + ".rbm", true);
timer.tac("learning duration: ");
if(abc->IsStopRequired())
{
cout << endl;
for(int i = 3; i > 0; --i)
{
cout << "\rsave will be started in " << i << flush;
sleep(1);
}
cout << "\rsave will be started now! " << endl;
}
abc->Save(string(argv[3]) + ".rbm");
return 0;
}
|
cf89bf377134e8bfc762b6728a7cc2742119c859.cu
|
#include <vector>
#include <iostream>
#include <fstream>
#include <iomanip>
#include <cstdlib>
#include <string>
#include <time.h>
#include <csignal>
using namespace std;
#include "matrix.h"
#include "crbmGpu.h"
using namespace YAMATH;
typedef MatrixGpu Mat;
CRBM::CRBMLayerGpu *abc = NULL;
void signalHandler(int signum)
{
if(abc != NULL)
{
cout << endl;
cout << "!!! Forcing RBM to interrupt learning ... !!!" << endl;
cout << "!!! repeated CTRL+C will stop program without saving !!!" << endl;
cout << endl;
abc->SignalStop();
//clear handler
signal(SIGINT, SIG_DFL);
}
else
{
exit(signum);
}
}
int main(int argc, char** argv)
{
if(argc != 4 && argc != 5)
{
cout << "Too few params!" << endl;
cout << argv[0] << " setting-file model-file input-vector-file [cudadevice-id]" << endl;
cout << "\tmodel-file can be \"-\" for random-model initialization." << endl;
exit(1);
}
if(argc > 4)
{
int device = atoi(argv[4]);
cout << "Device ID: " << device << endl;
cudaSetDevice(device);
}
cublasStatus_t stat;
cublasHandle_t handle;
cout << "cublas init ..." << flush;
stat = cublasCreate(&handle);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
cout << " done" << endl;
CRBM::CRBMLayerSetting setting;
setting.loadFromFile(argv[1]);
//register signal SIGINT and signal handler
signal(SIGINT, signalHandler);
Timer timer;
if(string(argv[2]) != "-")
{
cout << "Loading RBM-layer ... " << flush;
abc = new CRBM::CRBMLayerGpu(setting);
abc->Load(string(argv[2]));
//reset loaded setting
abc->ResetSetting(setting);
}
else
{
cout << "Creating RBM-layer ... " << flush;
abc = new CRBM::CRBMLayerGpu(setting);
timer.tac(" ... done in ");
}
MatrixCpu x;
//matrix is transposed!!! => pictures are saved one after another - like in the file => row major ~ col major transposed
//loadMatrix(x, argv[3], true, string(argv[3]) + ".cache");
MatrixLoaderFile loader(argv[3]);
loader.LoadComplete(x, true);
timer.tic();
abc->LearnAll(x, string(argv[3]) + ".rbm", true);
timer.tac("learning duration: ");
if(abc->IsStopRequired())
{
cout << endl;
for(int i = 3; i > 0; --i)
{
cout << "\rsave will be started in " << i << flush;
sleep(1);
}
cout << "\rsave will be started now! " << endl;
}
abc->Save(string(argv[3]) + ".rbm");
return 0;
}
|
a248d542cd6d297e73388107dac3a4ca28c33eb7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "VectorReductHead.cuh"
void InitializeArray(DATA_TYPE *array, unsigned int size);
DATA_TYPE ReductHost(DATA_TYPE *array, unsigned int size);
void reduce_2_cpu(DATA_TYPE *array, unsigned int size)
{
double temp=0.0;
int i;
for ( i = 0; i < size; i++)
{
temp += array[i];
}
printf("GPU result:%f\t", temp);
}
__global__ void kernel(DATA_TYPE *src, DATA_TYPE *dst, unsigned int src_s)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int x = tx + bx * blockDim.x;
__shared__ DATA_TYPE d_s[BLOCKSIZE];
d_s[tx] = x < src_s ? src[x] : 0.0;
__syncthreads();
int stride = blockDim.x;
int j;
for(j = stride; j > 0; j /= 2)
{
if (tx < j && tx + j < blockDim.x)
{
d_s[tx] += d_s[tx + j];
}
__syncthreads();
}
if (tx == 0)
{
dst[bx] = d_s[0];
}
}
__device__ void warpReduce(volatile DATA_TYPE *d_s, unsigned int tx)
{
if (tx < 32) d_s[tx] += d_s[tx + 32];
if (tx < 16) d_s[tx] += d_s[tx + 16];
if (tx < 8) d_s[tx] += d_s[tx + 8];
if (tx < 4) d_s[tx] += d_s[tx + 4];
if (tx < 2) d_s[tx] += d_s[tx + 2];
if (tx < 1) d_s[tx] += d_s[tx + 1];
}
__global__ void kernel_2(const DATA_TYPE *src, DATA_TYPE *dst, unsigned int src_s)
{
int tx = threadIdx.x;
int bx = blockIdx.x + blockIdx.y * gridDim.x;
int x = tx + bx * blockDim.x;
__shared__ DATA_TYPE d_s[BLOCKSIZE];
d_s[tx] = x < src_s ? src[x] : 0.0;
__syncthreads();
if (BLOCKSIZE >= 512)
{
if (tx < 256) d_s[tx] += d_s[tx + 256];
__syncthreads();
}
if (BLOCKSIZE >= 256)
{
if (tx < 128) d_s[tx] += d_s[tx + 128];
__syncthreads();
}
if (BLOCKSIZE >= 128)
{
if (tx < 64) d_s[tx] += d_s[tx + 64];
__syncthreads();
}
if (tx < 32) warpReduce(d_s, tx);
if (tx == 0)
{
dst[bx] = d_s[0];
}
//if (tx == 0 && bx < 153)
// printf("%f\n", dst[bx]);
}
__global__ void kernel_3(DATA_TYPE *d_D, DATA_TYPE *d_B, unsigned int size_d)
{
int tx = threadIdx.x;
//printf("d_D: %d, %f\n", tx, d_D[tx]);
__shared__ DATA_TYPE d_s[BLOCKSIZE_3];
d_s[tx] = 0;
if (tx< size_d)
d_s[tx] = d_D[tx];
__syncthreads();
//printf("d_s: %d: %f\n",tx, d_s[tx]);
if (tx < 128) d_s[tx] += d_s[tx + 128];
__syncthreads();
if (tx < 64) d_s[tx] += d_s[tx + 64];
__syncthreads();
if (tx < 32) warpReduce(d_s, tx);
if (tx == 0)
{
d_B[0] = d_s[0];
//printf("%f\n", d_s[0]);
//printf("%f\n", d_B[0]);
}
}
void kernel_call(DATA_TYPE *d_A, DATA_TYPE *d_B, unsigned int size_A)
{
DATA_TYPE *d_C;
DATA_TYPE *d_D;
dim3 block(BLOCKSIZE);
dim3 grid((N + block.x - 1) / block.x);
unsigned int size_C = grid.x;
unsigned int SIZEC = size_C * sizeof(DATA_TYPE);
dim3 block_2(BLOCKSIZE);
dim3 grid_2((size_C + block_2.x-1) / block_2.x);
unsigned int size_D = grid_2.x;
unsigned int SIZED = size_D * sizeof(DATA_TYPE);
CHECK(hipMalloc((void**)&d_C, SIZEC));
CHECK(hipMemset(d_C, 0, SIZEC));
CHECK(hipMalloc((void**)&d_D, SIZED));
CHECK(hipMemset(d_D, 0, SIZED));
hipEvent_t start, stop;
CHECK(hipEventCreate(&start));
CHECK(hipEventCreate(&stop));
CHECK(hipEventRecord(start, 0));
hipEventQuery(start);
//printf("size_A=%d\t ", size_A); 1000 0000
//printf("size_C=%d\t ", size_C); 39063
//printf("size_D=%d\t ", size_D); 153
//kernel<<<grid,block>>>(d_A, d_C, size_A);
kernel_2 << <grid, block >> > (d_A, d_C, N);
CHECK(hipGetLastError());
kernel_2 << <grid_2, block_2 >> > (d_C, d_D, size_C);
CHECK(hipGetLastError());
kernel_3 << <1, BLOCKSIZE_3 >> > (d_D, d_B, size_D);
CHECK(hipGetLastError());
CHECK(hipDeviceSynchronize()); //hipDeviceSynchronize() is deprecated
//time end
CHECK(hipEventRecord(stop, 0));
CHECK(hipEventSynchronize(stop));
float elapsedTime_cuda;
CHECK(hipEventElapsedTime(&elapsedTime_cuda, start, stop));
printf("kernel time=%f ms\n", elapsedTime_cuda);
CHECK(hipEventDestroy(start));
CHECK(hipEventDestroy(stop));
//DATA_TYPE *h_C = (DATA_TYPE*)malloc(SIZEC);
//CHECK(hipMemcpy(h_C, d_C, SIZEC, hipMemcpyDefault));
//reduce_2_cpu(h_C, size_C);
hipFree(d_C);
hipFree(d_D);
}
int main()
{
unsigned int SIZEA = N * sizeof(DATA_TYPE);
int SIZEB = 1 * sizeof(DATA_TYPE);
printf("Allocate %e MB on CPU\n", SIZEA / (1024.f*1024.f));
DATA_TYPE *h_A = (DATA_TYPE*)malloc(SIZEA);
DATA_TYPE *h_B = (DATA_TYPE*)malloc(SIZEB);
if (h_A == NULL)
printf("Failed to allocate CPU memory - h_A\n");
memset(h_A, 0, SIZEA);
memset(h_B, 0, SIZEB);
InitializeArray(h_A, N);
//timer
LARGE_INTEGER nFreq;
LARGE_INTEGER nBeginTime;
LARGE_INTEGER nEndTime;
double host_time;
QueryPerformanceFrequency(&nFreq);
QueryPerformanceCounter(&nBeginTime);
DATA_TYPE h_sum =ReductHost(h_A, N);
printf("CPU result:%f\t", h_sum);
QueryPerformanceCounter(&nEndTime);
host_time = (double)(nEndTime.QuadPart - nBeginTime.QuadPart) / (double)nFreq.QuadPart;
printf("CPU time: %f ms\n", host_time*1000);
//GPU
DATA_TYPE *d_A;
DATA_TYPE *d_B;
CHECK(hipMalloc((void**)&d_A, SIZEA));
CHECK(hipMalloc((void**)&d_B, SIZEB));
CHECK(hipMemcpy(d_A, h_A, SIZEA, hipMemcpyDefault));
//timer
hipEvent_t start, stop;
CHECK(hipEventCreate(&start));
CHECK(hipEventCreate(&stop));
CHECK(hipEventRecord(start, 0));
hipEventQuery(start);
kernel_call(d_A, d_B, N);
//time end
CHECK(hipEventRecord(stop, 0));
CHECK(hipEventSynchronize(stop));
float elapsedTime;
CHECK(hipEventElapsedTime(&elapsedTime, start, stop));
CHECK(hipEventDestroy(start));
CHECK(hipEventDestroy(stop));
CHECK(hipMemcpy(h_B, d_B, SIZEB, hipMemcpyDefault));
printf("GPU result:%f\t", h_B[0]);
printf("GPU time=%f ms\n", elapsedTime);
printf("Speed up=%f \n", (host_time * 1000)/elapsedTime);
free(h_A);
free(h_B);
hipFree(d_A);
hipFree(d_B);
return 0;
}
void InitializeArray(DATA_TYPE *array, unsigned int size)
{
int i;
for (i = 0; i < size; i++)
{
array[i] = 1.0;
}
}
DATA_TYPE ReductHost(DATA_TYPE *array, unsigned int size)
{
unsigned int i;
double result = 0;
for (i = 0; i < size; i++)
result += array[i];
DATA_TYPE result_1 = (DATA_TYPE)result;
return result_1;
}
|
a248d542cd6d297e73388107dac3a4ca28c33eb7.cu
|
#include "VectorReductHead.cuh"
void InitializeArray(DATA_TYPE *array, unsigned int size);
DATA_TYPE ReductHost(DATA_TYPE *array, unsigned int size);
void reduce_2_cpu(DATA_TYPE *array, unsigned int size)
{
double temp=0.0;
int i;
for ( i = 0; i < size; i++)
{
temp += array[i];
}
printf("GPU result:%f\t", temp);
}
__global__ void kernel(DATA_TYPE *src, DATA_TYPE *dst, unsigned int src_s)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int x = tx + bx * blockDim.x;
__shared__ DATA_TYPE d_s[BLOCKSIZE];
d_s[tx] = x < src_s ? src[x] : 0.0;
__syncthreads();
int stride = blockDim.x;
int j;
for(j = stride; j > 0; j /= 2)
{
if (tx < j && tx + j < blockDim.x)
{
d_s[tx] += d_s[tx + j];
}
__syncthreads();
}
if (tx == 0)
{
dst[bx] = d_s[0];
}
}
__device__ void warpReduce(volatile DATA_TYPE *d_s, unsigned int tx)
{
if (tx < 32) d_s[tx] += d_s[tx + 32];
if (tx < 16) d_s[tx] += d_s[tx + 16];
if (tx < 8) d_s[tx] += d_s[tx + 8];
if (tx < 4) d_s[tx] += d_s[tx + 4];
if (tx < 2) d_s[tx] += d_s[tx + 2];
if (tx < 1) d_s[tx] += d_s[tx + 1];
}
__global__ void kernel_2(const DATA_TYPE *src, DATA_TYPE *dst, unsigned int src_s)
{
int tx = threadIdx.x;
int bx = blockIdx.x + blockIdx.y * gridDim.x;
int x = tx + bx * blockDim.x;
__shared__ DATA_TYPE d_s[BLOCKSIZE];
d_s[tx] = x < src_s ? src[x] : 0.0;
__syncthreads();
if (BLOCKSIZE >= 512)
{
if (tx < 256) d_s[tx] += d_s[tx + 256];
__syncthreads();
}
if (BLOCKSIZE >= 256)
{
if (tx < 128) d_s[tx] += d_s[tx + 128];
__syncthreads();
}
if (BLOCKSIZE >= 128)
{
if (tx < 64) d_s[tx] += d_s[tx + 64];
__syncthreads();
}
if (tx < 32) warpReduce(d_s, tx);
if (tx == 0)
{
dst[bx] = d_s[0];
}
//if (tx == 0 && bx < 153)
// printf("%f\n", dst[bx]);
}
__global__ void kernel_3(DATA_TYPE *d_D, DATA_TYPE *d_B, unsigned int size_d)
{
int tx = threadIdx.x;
//printf("d_D: %d, %f\n", tx, d_D[tx]);
__shared__ DATA_TYPE d_s[BLOCKSIZE_3];
d_s[tx] = 0;
if (tx< size_d)
d_s[tx] = d_D[tx];
__syncthreads();
//printf("d_s: %d: %f\n",tx, d_s[tx]);
if (tx < 128) d_s[tx] += d_s[tx + 128];
__syncthreads();
if (tx < 64) d_s[tx] += d_s[tx + 64];
__syncthreads();
if (tx < 32) warpReduce(d_s, tx);
if (tx == 0)
{
d_B[0] = d_s[0];
//printf("%f\n", d_s[0]);
//printf("%f\n", d_B[0]);
}
}
void kernel_call(DATA_TYPE *d_A, DATA_TYPE *d_B, unsigned int size_A)
{
DATA_TYPE *d_C;
DATA_TYPE *d_D;
dim3 block(BLOCKSIZE);
dim3 grid((N + block.x - 1) / block.x);
unsigned int size_C = grid.x;
unsigned int SIZEC = size_C * sizeof(DATA_TYPE);
dim3 block_2(BLOCKSIZE);
dim3 grid_2((size_C + block_2.x-1) / block_2.x);
unsigned int size_D = grid_2.x;
unsigned int SIZED = size_D * sizeof(DATA_TYPE);
CHECK(cudaMalloc((void**)&d_C, SIZEC));
CHECK(cudaMemset(d_C, 0, SIZEC));
CHECK(cudaMalloc((void**)&d_D, SIZED));
CHECK(cudaMemset(d_D, 0, SIZED));
cudaEvent_t start, stop;
CHECK(cudaEventCreate(&start));
CHECK(cudaEventCreate(&stop));
CHECK(cudaEventRecord(start, 0));
cudaEventQuery(start);
//printf("size_A=%d\t ", size_A); 1000 0000
//printf("size_C=%d\t ", size_C); 39063
//printf("size_D=%d\t ", size_D); 153
//kernel<<<grid,block>>>(d_A, d_C, size_A);
kernel_2 << <grid, block >> > (d_A, d_C, N);
CHECK(cudaGetLastError());
kernel_2 << <grid_2, block_2 >> > (d_C, d_D, size_C);
CHECK(cudaGetLastError());
kernel_3 << <1, BLOCKSIZE_3 >> > (d_D, d_B, size_D);
CHECK(cudaGetLastError());
CHECK(cudaDeviceSynchronize()); //cudaThreadSynchronize() is deprecated
//time end
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
float elapsedTime_cuda;
CHECK(cudaEventElapsedTime(&elapsedTime_cuda, start, stop));
printf("kernel time=%f ms\n", elapsedTime_cuda);
CHECK(cudaEventDestroy(start));
CHECK(cudaEventDestroy(stop));
//DATA_TYPE *h_C = (DATA_TYPE*)malloc(SIZEC);
//CHECK(cudaMemcpy(h_C, d_C, SIZEC, cudaMemcpyDefault));
//reduce_2_cpu(h_C, size_C);
cudaFree(d_C);
cudaFree(d_D);
}
int main()
{
unsigned int SIZEA = N * sizeof(DATA_TYPE);
int SIZEB = 1 * sizeof(DATA_TYPE);
printf("Allocate %e MB on CPU\n", SIZEA / (1024.f*1024.f));
DATA_TYPE *h_A = (DATA_TYPE*)malloc(SIZEA);
DATA_TYPE *h_B = (DATA_TYPE*)malloc(SIZEB);
if (h_A == NULL)
printf("Failed to allocate CPU memory - h_A\n");
memset(h_A, 0, SIZEA);
memset(h_B, 0, SIZEB);
InitializeArray(h_A, N);
//timer
LARGE_INTEGER nFreq;
LARGE_INTEGER nBeginTime;
LARGE_INTEGER nEndTime;
double host_time;
QueryPerformanceFrequency(&nFreq);
QueryPerformanceCounter(&nBeginTime);
DATA_TYPE h_sum =ReductHost(h_A, N);
printf("CPU result:%f\t", h_sum);
QueryPerformanceCounter(&nEndTime);
host_time = (double)(nEndTime.QuadPart - nBeginTime.QuadPart) / (double)nFreq.QuadPart;
printf("CPU time: %f ms\n", host_time*1000);
//GPU
DATA_TYPE *d_A;
DATA_TYPE *d_B;
CHECK(cudaMalloc((void**)&d_A, SIZEA));
CHECK(cudaMalloc((void**)&d_B, SIZEB));
CHECK(cudaMemcpy(d_A, h_A, SIZEA, cudaMemcpyDefault));
//timer
cudaEvent_t start, stop;
CHECK(cudaEventCreate(&start));
CHECK(cudaEventCreate(&stop));
CHECK(cudaEventRecord(start, 0));
cudaEventQuery(start);
kernel_call(d_A, d_B, N);
//time end
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
float elapsedTime;
CHECK(cudaEventElapsedTime(&elapsedTime, start, stop));
CHECK(cudaEventDestroy(start));
CHECK(cudaEventDestroy(stop));
CHECK(cudaMemcpy(h_B, d_B, SIZEB, cudaMemcpyDefault));
printf("GPU result:%f\t", h_B[0]);
printf("GPU time=%f ms\n", elapsedTime);
printf("Speed up=%f \n", (host_time * 1000)/elapsedTime);
free(h_A);
free(h_B);
cudaFree(d_A);
cudaFree(d_B);
return 0;
}
void InitializeArray(DATA_TYPE *array, unsigned int size)
{
int i;
for (i = 0; i < size; i++)
{
array[i] = 1.0;
}
}
DATA_TYPE ReductHost(DATA_TYPE *array, unsigned int size)
{
unsigned int i;
double result = 0;
for (i = 0; i < size; i++)
result += array[i];
DATA_TYPE result_1 = (DATA_TYPE)result;
return result_1;
}
|
c94d088ca3d6aa17a15d51fa0f27fcd9d2490aa2.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO ORC reader class implementation
*/
#include "orc.h"
#include "orc_gpu.h"
#include "reader_impl.hpp"
#include "timezone.cuh"
#include <io/comp/gpuinflate.h>
#include <io/utilities/config_utils.hpp>
#include <io/utilities/time_utils.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <nvcomp/snappy.h>
#include <algorithm>
#include <iterator>
namespace cudf {
namespace io {
namespace detail {
namespace orc {
using namespace cudf::io::orc;
namespace {
/**
* @brief Function that translates ORC data kind to cuDF type enum
*/
constexpr type_id to_type_id(const orc::SchemaType& schema,
bool use_np_dtypes,
type_id timestamp_type_id,
bool decimals_as_float64)
{
switch (schema.kind) {
case orc::BOOLEAN: return type_id::BOOL8;
case orc::BYTE: return type_id::INT8;
case orc::SHORT: return type_id::INT16;
case orc::INT: return type_id::INT32;
case orc::LONG: return type_id::INT64;
case orc::FLOAT: return type_id::FLOAT32;
case orc::DOUBLE: return type_id::FLOAT64;
case orc::STRING:
case orc::BINARY:
case orc::VARCHAR:
case orc::CHAR:
// Variable-length types can all be mapped to STRING
return type_id::STRING;
case orc::TIMESTAMP:
return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id
: type_id::TIMESTAMP_NANOSECONDS;
case orc::DATE:
// There isn't a (DAYS -> np.dtype) mapping
return (use_np_dtypes) ? type_id::TIMESTAMP_MILLISECONDS : type_id::TIMESTAMP_DAYS;
case orc::DECIMAL: return (decimals_as_float64) ? type_id::FLOAT64 : type_id::DECIMAL64;
// Need to update once cuDF plans to support map type
case orc::MAP:
case orc::LIST: return type_id::LIST;
case orc::STRUCT: return type_id::STRUCT;
default: break;
}
return type_id::EMPTY;
}
constexpr std::pair<gpu::StreamIndexType, uint32_t> get_index_type_and_pos(
const orc::StreamKind kind, uint32_t skip_count, bool non_child)
{
switch (kind) {
case orc::DATA:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 8;
return std::make_pair(gpu::CI_DATA, skip_count);
case orc::LENGTH:
case orc::SECONDARY:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 16;
return std::make_pair(gpu::CI_DATA2, skip_count);
case orc::DICTIONARY_DATA: return std::make_pair(gpu::CI_DICTIONARY, skip_count);
case orc::PRESENT:
skip_count += (non_child ? 1 : 0);
return std::make_pair(gpu::CI_PRESENT, skip_count);
case orc::ROW_INDEX: return std::make_pair(gpu::CI_INDEX, skip_count);
default:
// Skip this stream as it's not strictly required
return std::make_pair(gpu::CI_NUM_STREAMS, 0);
}
}
/**
* @brief struct to store buffer data and size of list buffer
*/
struct list_buffer_data {
size_type* data;
size_type size;
};
// Generates offsets for list buffer from number of elements in a row.
void generate_offsets_for_list(rmm::device_uvector<list_buffer_data> const& buff_data,
rmm::cuda_stream_view stream)
{
auto transformer = [] __device__(list_buffer_data list_data) {
thrust::exclusive_scan(
thrust::seq, list_data.data, list_data.data + list_data.size, list_data.data);
};
thrust::for_each(rmm::exec_policy(stream), buff_data.begin(), buff_data.end(), transformer);
stream.synchronize();
}
/**
* @brief Struct that maps ORC streams to columns
*/
struct orc_stream_info {
orc_stream_info() = default;
explicit orc_stream_info(
uint64_t offset_, size_t dst_pos_, uint32_t length_, uint32_t gdf_idx_, uint32_t stripe_idx_)
: offset(offset_),
dst_pos(dst_pos_),
length(length_),
gdf_idx(gdf_idx_),
stripe_idx(stripe_idx_)
{
}
uint64_t offset; // offset in file
size_t dst_pos; // offset in memory relative to start of compressed stripe data
size_t length; // length in file
uint32_t gdf_idx; // column index
uint32_t stripe_idx; // stripe index
};
/**
* @brief Function that populates column descriptors stream/chunk
*/
size_t gather_stream_info(const size_t stripe_index,
const orc::StripeInformation* stripeinfo,
const orc::StripeFooter* stripefooter,
const std::vector<int>& orc2gdf,
const std::vector<orc::SchemaType> types,
bool use_index,
size_t* num_dictionary_entries,
cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks,
std::vector<orc_stream_info>& stream_info,
bool apply_struct_map)
{
uint64_t src_offset = 0;
uint64_t dst_offset = 0;
for (const auto& stream : stripefooter->streams) {
if (!stream.column_id || *stream.column_id >= orc2gdf.size()) {
dst_offset += stream.length;
continue;
}
auto const column_id = *stream.column_id;
auto col = orc2gdf[column_id];
if (col == -1 and apply_struct_map) {
// A struct-type column has no data itself, but rather child columns
// for each of its fields. There is only a PRESENT stream, which
// needs to be included for the reader.
const auto schema_type = types[column_id];
if (schema_type.subtypes.size() != 0) {
if (schema_type.kind == orc::STRUCT && stream.kind == orc::PRESENT) {
for (const auto& idx : schema_type.subtypes) {
auto child_idx = (idx < orc2gdf.size()) ? orc2gdf[idx] : -1;
if (child_idx >= 0) {
col = child_idx;
auto& chunk = chunks[stripe_index][col];
chunk.strm_id[gpu::CI_PRESENT] = stream_info.size();
chunk.strm_len[gpu::CI_PRESENT] = stream.length;
}
}
}
}
}
if (col != -1) {
if (src_offset >= stripeinfo->indexLength || use_index) {
// NOTE: skip_count field is temporarily used to track index ordering
auto& chunk = chunks[stripe_index][col];
const auto idx =
get_index_type_and_pos(stream.kind, chunk.skip_count, col == orc2gdf[column_id]);
if (idx.first < gpu::CI_NUM_STREAMS) {
chunk.strm_id[idx.first] = stream_info.size();
chunk.strm_len[idx.first] = stream.length;
chunk.skip_count = idx.second;
if (idx.first == gpu::CI_DICTIONARY) {
chunk.dictionary_start = *num_dictionary_entries;
chunk.dict_len = stripefooter->columns[column_id].dictionarySize;
*num_dictionary_entries += stripefooter->columns[column_id].dictionarySize;
}
}
}
stream_info.emplace_back(
stripeinfo->offset + src_offset, dst_offset, stream.length, col, stripe_index);
dst_offset += stream.length;
}
src_offset += stream.length;
}
return dst_offset;
}
/**
* @brief Determines if a column should be converted from decimal to float
*/
bool should_convert_decimal_column_to_float(const std::vector<std::string>& columns_to_convert,
cudf::io::orc::metadata& metadata,
int column_index)
{
return (std::find(columns_to_convert.begin(),
columns_to_convert.end(),
metadata.column_name(column_index)) != columns_to_convert.end());
}
} // namespace
void snappy_decompress(device_span<gpu_inflate_input_s> comp_in,
device_span<gpu_inflate_status_s> comp_stat,
size_t max_uncomp_page_size,
rmm::cuda_stream_view stream)
{
size_t num_blocks = comp_in.size();
size_t temp_size;
auto status =
nvcompBatchedSnappyDecompressGetTempSize(num_blocks, max_uncomp_page_size, &temp_size);
CUDF_EXPECTS(nvcompStatus_t::nvcompSuccess == status,
"Unable to get scratch size for snappy decompression");
rmm::device_buffer scratch(temp_size, stream);
rmm::device_uvector<void const*> compressed_data_ptrs(num_blocks, stream);
rmm::device_uvector<size_t> compressed_data_sizes(num_blocks, stream);
rmm::device_uvector<void*> uncompressed_data_ptrs(num_blocks, stream);
rmm::device_uvector<size_t> uncompressed_data_sizes(num_blocks, stream);
rmm::device_uvector<size_t> actual_uncompressed_data_sizes(num_blocks, stream);
rmm::device_uvector<nvcompStatus_t> statuses(num_blocks, stream);
// Prepare the vectors
auto comp_it = thrust::make_zip_iterator(compressed_data_ptrs.begin(),
compressed_data_sizes.begin(),
uncompressed_data_ptrs.begin(),
uncompressed_data_sizes.data());
thrust::transform(rmm::exec_policy(stream),
comp_in.begin(),
comp_in.end(),
comp_it,
[] __device__(gpu_inflate_input_s in) {
return thrust::make_tuple(in.srcDevice, in.srcSize, in.dstDevice, in.dstSize);
});
status = nvcompBatchedSnappyDecompressAsync(compressed_data_ptrs.data(),
compressed_data_sizes.data(),
uncompressed_data_sizes.data(),
actual_uncompressed_data_sizes.data(),
num_blocks,
scratch.data(),
scratch.size(),
uncompressed_data_ptrs.data(),
statuses.data(),
stream.value());
CUDF_EXPECTS(nvcompStatus_t::nvcompSuccess == status, "unable to perform snappy decompression");
CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream),
statuses.begin(),
statuses.end(),
thrust::make_constant_iterator(nvcompStatus_t::nvcompSuccess)),
"Error during snappy decompression");
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
num_blocks,
[=, actual_uncomp_sizes = actual_uncompressed_data_sizes.data()] __device__(auto i) {
comp_stat[i].bytes_written = actual_uncomp_sizes[i];
comp_stat[i].status = 0;
});
}
rmm::device_buffer reader::impl::decompress_stripe_data(
cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks,
const std::vector<rmm::device_buffer>& stripe_data,
const OrcDecompressor* decompressor,
std::vector<orc_stream_info>& stream_info,
size_t num_stripes,
cudf::detail::hostdevice_2dvector<gpu::RowGroup>& row_groups,
size_t row_index_stride,
bool use_base_stride,
rmm::cuda_stream_view stream)
{
// Parse the columns' compressed info
hostdevice_vector<gpu::CompressedStreamInfo> compinfo(0, stream_info.size(), stream);
for (const auto& info : stream_info) {
compinfo.insert(gpu::CompressedStreamInfo(
static_cast<const uint8_t*>(stripe_data[info.stripe_idx].data()) + info.dst_pos,
info.length));
}
compinfo.host_to_device(stream);
gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio(),
stream);
compinfo.device_to_host(stream, true);
// Count the exact number of compressed blocks
size_t num_compressed_blocks = 0;
size_t num_uncompressed_blocks = 0;
size_t total_decomp_size = 0;
for (size_t i = 0; i < compinfo.size(); ++i) {
num_compressed_blocks += compinfo[i].num_compressed_blocks;
num_uncompressed_blocks += compinfo[i].num_uncompressed_blocks;
total_decomp_size += compinfo[i].max_uncompressed_size;
}
CUDF_EXPECTS(total_decomp_size > 0, "No decompressible data found");
rmm::device_buffer decomp_data(total_decomp_size, stream);
rmm::device_uvector<gpu_inflate_input_s> inflate_in(
num_compressed_blocks + num_uncompressed_blocks, stream);
rmm::device_uvector<gpu_inflate_status_s> inflate_out(num_compressed_blocks, stream);
// Parse again to populate the decompression input/output buffers
size_t decomp_offset = 0;
uint32_t max_uncomp_block_size = 0;
uint32_t start_pos = 0;
uint32_t start_pos_uncomp = (uint32_t)num_compressed_blocks;
for (size_t i = 0; i < compinfo.size(); ++i) {
auto dst_base = static_cast<uint8_t*>(decomp_data.data());
compinfo[i].uncompressed_data = dst_base + decomp_offset;
compinfo[i].decctl = inflate_in.data() + start_pos;
compinfo[i].decstatus = inflate_out.data() + start_pos;
compinfo[i].copyctl = inflate_in.data() + start_pos_uncomp;
stream_info[i].dst_pos = decomp_offset;
decomp_offset += compinfo[i].max_uncompressed_size;
start_pos += compinfo[i].num_compressed_blocks;
start_pos_uncomp += compinfo[i].num_uncompressed_blocks;
max_uncomp_block_size =
::max(max_uncomp_block_size, compinfo[i].max_uncompressed_block_size);
}
compinfo.host_to_device(stream);
gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio(),
stream);
// Dispatch batches of blocks to decompress
if (num_compressed_blocks > 0) {
switch (decompressor->GetKind()) {
case orc::ZLIB:
CUDA_TRY(
gpuinflate(inflate_in.data(), inflate_out.data(), num_compressed_blocks, 0, stream));
break;
case orc::SNAPPY:
if (nvcomp_integration::is_stable_enabled()) {
device_span<gpu_inflate_input_s> inflate_in_view{inflate_in.data(),
num_compressed_blocks};
device_span<gpu_inflate_status_s> inflate_out_view{inflate_out.data(),
num_compressed_blocks};
snappy_decompress(inflate_in_view, inflate_out_view, max_uncomp_block_size, stream);
} else {
CUDA_TRY(
gpu_unsnap(inflate_in.data(), inflate_out.data(), num_compressed_blocks, stream));
}
break;
default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break;
}
}
if (num_uncompressed_blocks > 0) {
CUDA_TRY(gpu_copy_uncompressed_blocks(
inflate_in.data() + num_compressed_blocks, num_uncompressed_blocks, stream));
}
gpu::PostDecompressionReassemble(compinfo.device_ptr(), compinfo.size(), stream);
// Update the stream information with the updated uncompressed info
// TBD: We could update the value from the information we already
// have in stream_info[], but using the gpu results also updates
// max_uncompressed_size to the actual uncompressed size, or zero if
// decompression failed.
compinfo.device_to_host(stream, true);
const size_t num_columns = chunks.size().second;
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto& chunk = chunks[i][j];
for (int k = 0; k < gpu::CI_NUM_STREAMS; ++k) {
if (chunk.strm_len[k] > 0 && chunk.strm_id[k] < compinfo.size()) {
chunk.streams[k] = compinfo[chunk.strm_id[k]].uncompressed_data;
chunk.strm_len[k] = compinfo[chunk.strm_id[k]].max_uncompressed_size;
}
}
}
}
if (row_groups.size().first) {
chunks.host_to_device(stream);
row_groups.host_to_device(stream);
gpu::ParseRowGroupIndex(row_groups.base_device_ptr(),
compinfo.device_ptr(),
chunks.base_device_ptr(),
num_columns,
num_stripes,
row_groups.size().first,
row_index_stride,
use_base_stride,
stream);
}
return decomp_data;
}
/**
* @brief Updates null mask of columns whose parent is a struct column.
* If struct column has null element, that row would be
* skipped while writing child column in ORC, so we need to insert the missing null
* elements in child column.
* There is another behavior from pyspark, where if the child column doesn't have any null
* elements, it will not have present stream, so in that case parent null mask need to be
* copied to child column.
*
* @param chunks Vector of list of column chunk descriptors
* @param out_buffers Output columns' device buffers
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource to use for device memory allocation
*/
void update_null_mask(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks,
std::vector<column_buffer>& out_buffers,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
const auto num_stripes = chunks.size().first;
const auto num_columns = chunks.size().second;
bool is_mask_updated = false;
for (size_t col_idx = 0; col_idx < num_columns; ++col_idx) {
if (chunks[0][col_idx].parent_validity_info.valid_map_base != nullptr) {
if (not is_mask_updated) {
chunks.device_to_host(stream, true);
is_mask_updated = true;
}
auto parent_valid_map_base = chunks[0][col_idx].parent_validity_info.valid_map_base;
auto child_valid_map_base = out_buffers[col_idx].null_mask();
auto child_mask_len =
chunks[0][col_idx].column_num_rows - chunks[0][col_idx].parent_validity_info.null_count;
auto parent_mask_len = chunks[0][col_idx].column_num_rows;
if (child_valid_map_base != nullptr) {
rmm::device_uvector<uint32_t> dst_idx(child_mask_len, stream);
// Copy indexes at which the parent has valid value.
thrust::copy_if(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + parent_mask_len,
dst_idx.begin(),
[parent_valid_map_base] __device__(auto idx) {
return bit_is_set(parent_valid_map_base, idx);
});
auto merged_null_mask = cudf::detail::create_null_mask(
parent_mask_len, mask_state::ALL_NULL, rmm::cuda_stream_view(stream), mr);
auto merged_mask = static_cast<bitmask_type*>(merged_null_mask.data());
uint32_t* dst_idx_ptr = dst_idx.data();
// Copy child valid bits from child column to valid indexes, this will merge both child and
// parent null masks
thrust::for_each(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + dst_idx.size(),
[child_valid_map_base, dst_idx_ptr, merged_mask] __device__(auto idx) {
if (bit_is_set(child_valid_map_base, idx)) {
cudf::set_bit(merged_mask, dst_idx_ptr[idx]);
};
});
out_buffers[col_idx]._null_mask = std::move(merged_null_mask);
} else {
// Since child column doesn't have a mask, copy parent null mask
auto mask_size = bitmask_allocation_size_bytes(parent_mask_len);
out_buffers[col_idx]._null_mask =
rmm::device_buffer(static_cast<void*>(parent_valid_map_base), mask_size, stream, mr);
}
}
}
thrust::counting_iterator<int> col_idx_it(0);
thrust::counting_iterator<int> stripe_idx_it(0);
if (is_mask_updated) {
// Update chunks with pointers to column data which might have been changed.
std::for_each(stripe_idx_it, stripe_idx_it + num_stripes, [&](auto stripe_idx) {
std::for_each(col_idx_it, col_idx_it + num_columns, [&](auto col_idx) {
auto& chunk = chunks[stripe_idx][col_idx];
chunk.valid_map_base = out_buffers[col_idx].null_mask();
});
});
chunks.host_to_device(stream, true);
}
}
/**
* @brief Compute the per-stripe prefix sum of null count, for each struct column in the current
* layer.
*/
void scan_null_counts(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc> const& chunks,
cudf::host_span<rmm::device_uvector<uint32_t>> prefix_sums,
rmm::cuda_stream_view stream)
{
auto const num_stripes = chunks.size().first;
if (num_stripes == 0) return;
auto const num_columns = chunks.size().second;
std::vector<thrust::pair<size_type, cudf::device_span<uint32_t>>> prefix_sums_to_update;
for (auto col_idx = 0ul; col_idx < num_columns; ++col_idx) {
// Null counts sums are only needed for children of struct columns
if (chunks[0][col_idx].type_kind == STRUCT) {
prefix_sums_to_update.emplace_back(col_idx, prefix_sums[col_idx]);
}
}
auto const d_prefix_sums_to_update =
cudf::detail::make_device_uvector_async(prefix_sums_to_update, stream);
thrust::for_each(rmm::exec_policy(stream),
d_prefix_sums_to_update.begin(),
d_prefix_sums_to_update.end(),
[chunks = cudf::detail::device_2dspan<gpu::ColumnDesc const>{chunks}] __device__(
auto const& idx_psums) {
auto const col_idx = idx_psums.first;
auto const psums = idx_psums.second;
thrust::transform(
thrust::seq,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + psums.size(),
psums.begin(),
[&](auto stripe_idx) { return chunks[stripe_idx][col_idx].null_count; });
thrust::inclusive_scan(thrust::seq, psums.begin(), psums.end(), psums.begin());
});
// `prefix_sums_to_update` goes out of scope, copy has to be done before we return
stream.synchronize();
}
void reader::impl::decode_stream_data(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks,
size_t num_dicts,
size_t skip_rows,
timezone_table_view tz_table,
cudf::detail::hostdevice_2dvector<gpu::RowGroup>& row_groups,
size_t row_index_stride,
std::vector<column_buffer>& out_buffers,
size_t level,
rmm::cuda_stream_view stream)
{
const auto num_stripes = chunks.size().first;
const auto num_columns = chunks.size().second;
thrust::counting_iterator<int> col_idx_it(0);
thrust::counting_iterator<int> stripe_idx_it(0);
// Update chunks with pointers to column data
std::for_each(stripe_idx_it, stripe_idx_it + num_stripes, [&](auto stripe_idx) {
std::for_each(col_idx_it, col_idx_it + num_columns, [&](auto col_idx) {
auto& chunk = chunks[stripe_idx][col_idx];
chunk.column_data_base = out_buffers[col_idx].data();
chunk.valid_map_base = out_buffers[col_idx].null_mask();
});
});
// Allocate global dictionary for deserializing
rmm::device_uvector<gpu::DictionaryEntry> global_dict(num_dicts, stream);
chunks.host_to_device(stream, true);
gpu::DecodeNullsAndStringDictionaries(
chunks.base_device_ptr(), global_dict.data(), num_columns, num_stripes, skip_rows, stream);
if (level > 0) {
// Update nullmasks for children if parent was a struct and had null mask
update_null_mask(chunks, out_buffers, stream, _mr);
}
// Update the null map for child columns
gpu::DecodeOrcColumnData(chunks.base_device_ptr(),
global_dict.data(),
row_groups,
num_columns,
num_stripes,
skip_rows,
tz_table,
row_groups.size().first,
row_index_stride,
level,
stream);
chunks.device_to_host(stream, true);
std::for_each(col_idx_it + 0, col_idx_it + num_columns, [&](auto col_idx) {
out_buffers[col_idx].null_count() =
std::accumulate(stripe_idx_it + 0,
stripe_idx_it + num_stripes,
0,
[&](auto null_count, auto const stripe_idx) {
return null_count + chunks[stripe_idx][col_idx].null_count;
});
});
}
// Aggregate child column metadata per stripe and per column
void reader::impl::aggregate_child_meta(cudf::detail::host_2dspan<gpu::ColumnDesc> chunks,
cudf::detail::host_2dspan<gpu::RowGroup> row_groups,
std::vector<column_buffer>& out_buffers,
std::vector<orc_column_meta> const& list_col,
const size_type level)
{
const auto num_of_stripes = chunks.size().first;
const auto num_of_rowgroups = row_groups.size().first;
const auto num_parent_cols = selected_columns.levels[level].size();
const auto num_child_cols = selected_columns.levels[level + 1].size();
const auto number_of_child_chunks = num_child_cols * num_of_stripes;
auto& num_child_rows = _col_meta.num_child_rows;
auto& parent_column_data = _col_meta.parent_column_data;
// Reset the meta to store child column details.
num_child_rows.resize(selected_columns.levels[level + 1].size());
std::fill(num_child_rows.begin(), num_child_rows.end(), 0);
parent_column_data.resize(number_of_child_chunks);
_col_meta.parent_column_index.resize(number_of_child_chunks);
_col_meta.child_start_row.resize(number_of_child_chunks);
_col_meta.num_child_rows_per_stripe.resize(number_of_child_chunks);
_col_meta.rwgrp_meta.resize(num_of_rowgroups * num_child_cols);
auto child_start_row = cudf::detail::host_2dspan<uint32_t>(
_col_meta.child_start_row.data(), num_of_stripes, num_child_cols);
auto num_child_rows_per_stripe = cudf::detail::host_2dspan<uint32_t>(
_col_meta.num_child_rows_per_stripe.data(), num_of_stripes, num_child_cols);
auto rwgrp_meta = cudf::detail::host_2dspan<reader_column_meta::row_group_meta>(
_col_meta.rwgrp_meta.data(), num_of_rowgroups, num_child_cols);
int index = 0; // number of child column processed
// For each parent column, update its child column meta for each stripe.
std::for_each(list_col.cbegin(), list_col.cend(), [&](const auto p_col) {
const auto parent_col_idx = _col_meta.orc_col_map[level][p_col.id];
auto start_row = 0;
auto processed_row_groups = 0;
for (size_t stripe_id = 0; stripe_id < num_of_stripes; stripe_id++) {
// Aggregate num_rows and start_row from processed parent columns per row groups
if (num_of_rowgroups) {
auto stripe_num_row_groups = chunks[stripe_id][parent_col_idx].num_rowgroups;
auto processed_child_rows = 0;
for (size_t rowgroup_id = 0; rowgroup_id < stripe_num_row_groups;
rowgroup_id++, processed_row_groups++) {
const auto child_rows = row_groups[processed_row_groups][parent_col_idx].num_child_rows;
for (size_type id = 0; id < p_col.num_children; id++) {
const auto child_col_idx = index + id;
rwgrp_meta[processed_row_groups][child_col_idx].start_row = processed_child_rows;
rwgrp_meta[processed_row_groups][child_col_idx].num_rows = child_rows;
}
processed_child_rows += child_rows;
}
}
// Aggregate start row, number of rows per chunk and total number of rows in a column
const auto child_rows = chunks[stripe_id][parent_col_idx].num_child_rows;
for (size_type id = 0; id < p_col.num_children; id++) {
const auto child_col_idx = index + id;
num_child_rows[child_col_idx] += child_rows;
num_child_rows_per_stripe[stripe_id][child_col_idx] = child_rows;
// start row could be different for each column when there is nesting at each stripe level
child_start_row[stripe_id][child_col_idx] = (stripe_id == 0) ? 0 : start_row;
}
start_row += child_rows;
}
// Parent column null mask and null count would be required for child column
// to adjust its nullmask.
auto type = out_buffers[parent_col_idx].type.id();
auto parent_null_count = static_cast<uint32_t>(out_buffers[parent_col_idx].null_count());
auto parent_valid_map = out_buffers[parent_col_idx].null_mask();
auto num_rows = out_buffers[parent_col_idx].size;
for (size_type id = 0; id < p_col.num_children; id++) {
const auto child_col_idx = index + id;
_col_meta.parent_column_index[child_col_idx] = parent_col_idx;
if (type == type_id::STRUCT) {
parent_column_data[child_col_idx] = {parent_valid_map, parent_null_count};
// Number of rows in child will remain same as parent in case of struct column
num_child_rows[child_col_idx] = num_rows;
} else {
parent_column_data[child_col_idx] = {nullptr, 0};
}
}
index += p_col.num_children;
});
}
std::string get_map_child_col_name(size_t const idx) { return (idx == 0) ? "key" : "value"; }
std::unique_ptr<column> reader::impl::create_empty_column(const size_type orc_col_id,
column_name_info& schema_info,
rmm::cuda_stream_view stream)
{
schema_info.name = _metadata.column_name(0, orc_col_id);
// If the column type is orc::DECIMAL see if the user
// desires it to be converted to float64 or not
auto const decimal_as_float64 = should_convert_decimal_column_to_float(
_decimal_cols_as_float, _metadata.per_file_metadata[0], orc_col_id);
auto const type = to_type_id(
_metadata.get_schema(orc_col_id), _use_np_dtypes, _timestamp_type.id(), decimal_as_float64);
int32_t scale = 0;
std::vector<std::unique_ptr<column>> child_columns;
std::unique_ptr<column> out_col = nullptr;
auto kind = _metadata.get_col_type(orc_col_id).kind;
switch (kind) {
case orc::LIST:
schema_info.children.emplace_back("offsets");
schema_info.children.emplace_back("");
out_col = make_lists_column(
0,
make_empty_column(type_id::INT32),
create_empty_column(
_metadata.get_col_type(orc_col_id).subtypes[0], schema_info.children.back(), stream),
0,
rmm::device_buffer{0, stream},
stream);
break;
case orc::MAP: {
schema_info.children.emplace_back("offsets");
schema_info.children.emplace_back("struct");
const auto child_column_ids = _metadata.get_col_type(orc_col_id).subtypes;
for (size_t idx = 0; idx < _metadata.get_col_type(orc_col_id).subtypes.size(); idx++) {
auto& children_schema = schema_info.children.back().children;
children_schema.emplace_back("");
child_columns.push_back(create_empty_column(
child_column_ids[idx], schema_info.children.back().children.back(), stream));
auto name = get_map_child_col_name(idx);
children_schema[idx].name = name;
}
auto struct_col =
make_structs_column(0, std::move(child_columns), 0, rmm::device_buffer{0, stream}, stream);
out_col = make_lists_column(0,
make_empty_column(type_id::INT32),
std::move(struct_col),
0,
rmm::device_buffer{0, stream},
stream);
} break;
case orc::STRUCT:
for (const auto col : _metadata.get_col_type(orc_col_id).subtypes) {
schema_info.children.emplace_back("");
child_columns.push_back(create_empty_column(col, schema_info.children.back(), stream));
}
out_col =
make_structs_column(0, std::move(child_columns), 0, rmm::device_buffer{0, stream}, stream);
break;
case orc::DECIMAL:
if (type == type_id::DECIMAL64) {
scale = -static_cast<int32_t>(_metadata.get_types()[orc_col_id].scale.value_or(0));
}
out_col = make_empty_column(data_type(type, scale));
break;
default: out_col = make_empty_column(type);
}
return out_col;
}
// Adds child column buffers to parent column
column_buffer&& reader::impl::assemble_buffer(const size_type orc_col_id,
std::vector<std::vector<column_buffer>>& col_buffers,
const size_t level,
rmm::cuda_stream_view stream)
{
auto const col_id = _col_meta.orc_col_map[level][orc_col_id];
auto& col_buffer = col_buffers[level][col_id];
col_buffer.name = _metadata.column_name(0, orc_col_id);
auto kind = _metadata.get_col_type(orc_col_id).kind;
switch (kind) {
case orc::LIST:
case orc::STRUCT:
for (auto const& col : selected_columns.children[orc_col_id]) {
col_buffer.children.emplace_back(assemble_buffer(col, col_buffers, level + 1, stream));
}
break;
case orc::MAP: {
std::vector<column_buffer> child_col_buffers;
// Get child buffers
for (size_t idx = 0; idx < selected_columns.children[orc_col_id].size(); idx++) {
auto name = get_map_child_col_name(idx);
auto col = selected_columns.children[orc_col_id][idx];
child_col_buffers.emplace_back(assemble_buffer(col, col_buffers, level + 1, stream));
child_col_buffers.back().name = name;
}
// Create a struct buffer
auto num_rows = child_col_buffers[0].size;
auto struct_buffer =
column_buffer(cudf::data_type(type_id::STRUCT), num_rows, false, stream, _mr);
struct_buffer.children = std::move(child_col_buffers);
struct_buffer.name = "struct";
col_buffer.children.emplace_back(std::move(struct_buffer));
} break;
default: break;
}
return std::move(col_buffer);
}
// creates columns along with schema information for each column
void reader::impl::create_columns(std::vector<std::vector<column_buffer>>&& col_buffers,
std::vector<std::unique_ptr<column>>& out_columns,
std::vector<column_name_info>& schema_info,
rmm::cuda_stream_view stream)
{
std::transform(selected_columns.levels[0].begin(),
selected_columns.levels[0].end(),
std::back_inserter(out_columns),
[&](auto const col_meta) {
schema_info.emplace_back("");
auto col_buffer = assemble_buffer(col_meta.id, col_buffers, 0, stream);
return make_column(col_buffer, &schema_info.back(), stream, _mr);
});
}
reader::impl::impl(std::vector<std::unique_ptr<datasource>>&& sources,
orc_reader_options const& options,
rmm::mr::device_memory_resource* mr)
: _mr(mr),
_sources(std::move(sources)),
_metadata{_sources},
selected_columns{_metadata.select_columns(options.get_columns())}
{
// Override output timestamp resolution if requested
if (options.get_timestamp_type().id() != type_id::EMPTY) {
_timestamp_type = options.get_timestamp_type();
}
// Enable or disable attempt to use row index for parsing
_use_index = options.is_enabled_use_index();
// Enable or disable the conversion to numpy-compatible dtypes
_use_np_dtypes = options.is_enabled_use_np_dtypes();
// Control decimals conversion (float64 or int64 with optional scale)
_decimal_cols_as_float = options.get_decimal_cols_as_float();
}
timezone_table reader::impl::compute_timezone_table(
const std::vector<cudf::io::orc::metadata::stripe_source_mapping>& selected_stripes,
rmm::cuda_stream_view stream)
{
if (selected_stripes.empty()) return {};
auto const has_timestamp_column = std::any_of(
selected_columns.levels.cbegin(), selected_columns.levels.cend(), [&](auto& col_lvl) {
return std::any_of(col_lvl.cbegin(), col_lvl.cend(), [&](auto& col_meta) {
return _metadata.get_col_type(col_meta.id).kind == TypeKind::TIMESTAMP;
});
});
if (not has_timestamp_column) return {};
return build_timezone_transition_table(selected_stripes[0].stripe_info[0].second->writerTimezone,
stream);
}
table_with_metadata reader::impl::read(size_type skip_rows,
size_type num_rows,
const std::vector<std::vector<size_type>>& stripes,
rmm::cuda_stream_view stream)
{
// Selected columns at different levels of nesting are stored in different elements
// of `selected_columns`; thus, size == 1 means no nested columns
CUDF_EXPECTS(skip_rows == 0 or selected_columns.num_levels() == 1,
"skip_rows is not supported by nested columns");
std::vector<std::unique_ptr<column>> out_columns;
// buffer and stripe data are stored as per nesting level
std::vector<std::vector<column_buffer>> out_buffers(selected_columns.num_levels());
std::vector<column_name_info> schema_info;
std::vector<std::vector<rmm::device_buffer>> lvl_stripe_data(selected_columns.num_levels());
std::vector<std::vector<rmm::device_uvector<uint32_t>>> null_count_prefix_sums;
table_metadata out_metadata;
// There are no columns in the table
if (selected_columns.num_levels() == 0)
return {std::make_unique<table>(), std::move(out_metadata)};
// Select only stripes required (aka row groups)
const auto selected_stripes = _metadata.select_stripes(stripes, skip_rows, num_rows);
auto const tz_table = compute_timezone_table(selected_stripes, stream);
// Iterates through levels of nested columns, child column will be one level down
// compared to parent column.
for (size_t level = 0; level < selected_columns.num_levels(); level++) {
auto& columns_level = selected_columns.levels[level];
// Association between each ORC column and its cudf::column
_col_meta.orc_col_map.emplace_back(_metadata.get_num_cols(), -1);
std::vector<orc_column_meta> nested_col;
bool is_data_empty = false;
// Get a list of column data types
std::vector<data_type> column_types;
for (auto& col : columns_level) {
// If the column type is orc::DECIMAL see if the user
// desires it to be converted to float64 or not
auto const decimal_as_float64 = should_convert_decimal_column_to_float(
_decimal_cols_as_float, _metadata.per_file_metadata[0], col.id);
auto col_type = to_type_id(
_metadata.get_col_type(col.id), _use_np_dtypes, _timestamp_type.id(), decimal_as_float64);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
// Remove this once we support Decimal128 data type
CUDF_EXPECTS(
(col_type != type_id::DECIMAL64) or (_metadata.get_col_type(col.id).precision <= 18),
"Decimal data has precision > 18, Decimal64 data type doesn't support it.");
if (col_type == type_id::DECIMAL64) {
// sign of the scale is changed since cuDF follows c++ libraries like CNL
// which uses negative scaling, but liborc and other libraries
// follow positive scaling.
auto const scale =
-static_cast<size_type>(_metadata.get_col_type(col.id).scale.value_or(0));
column_types.emplace_back(col_type, scale);
} else {
column_types.emplace_back(col_type);
}
// Map each ORC column to its column
_col_meta.orc_col_map[level][col.id] = column_types.size() - 1;
// TODO: Once MAP type is supported in cuDF, update this for MAP as well
if (col_type == type_id::LIST or col_type == type_id::STRUCT) nested_col.emplace_back(col);
}
// If no rows or stripes to read, return empty columns
if (num_rows <= 0 || selected_stripes.empty()) {
std::transform(selected_columns.levels[0].begin(),
selected_columns.levels[0].end(),
std::back_inserter(out_columns),
[&](auto const col_meta) {
schema_info.emplace_back("");
return create_empty_column(col_meta.id, schema_info.back(), stream);
});
break;
} else {
// Get the total number of stripes across all input files.
size_t total_num_stripes =
std::accumulate(selected_stripes.begin(),
selected_stripes.end(),
0,
[](size_t sum, auto& stripe_source_mapping) {
return sum + stripe_source_mapping.stripe_info.size();
});
const auto num_columns = columns_level.size();
cudf::detail::hostdevice_2dvector<gpu::ColumnDesc> chunks(
total_num_stripes, num_columns, stream);
memset(chunks.base_host_ptr(), 0, chunks.memory_size());
const bool use_index =
(_use_index == true) &&
// Do stripes have row group index
_metadata.is_row_grp_idx_present() &&
// Only use if we don't have much work with complete columns & stripes
// TODO: Consider nrows, gpu, and tune the threshold
(num_rows > _metadata.get_row_index_stride() && !(_metadata.get_row_index_stride() & 7) &&
_metadata.get_row_index_stride() > 0 && num_columns * total_num_stripes < 8 * 128) &&
// Only use if first row is aligned to a stripe boundary
// TODO: Fix logic to handle unaligned rows
(skip_rows == 0);
// Logically view streams as columns
std::vector<orc_stream_info> stream_info;
null_count_prefix_sums.emplace_back();
null_count_prefix_sums.back().reserve(selected_columns.levels[level].size());
std::generate_n(std::back_inserter(null_count_prefix_sums.back()),
selected_columns.levels[level].size(),
[&]() {
return cudf::detail::make_zeroed_device_uvector_async<uint32_t>(
total_num_stripes, stream);
});
// Tracker for eventually deallocating compressed and uncompressed data
auto& stripe_data = lvl_stripe_data[level];
size_t stripe_start_row = 0;
size_t num_dict_entries = 0;
size_t num_rowgroups = 0;
int stripe_idx = 0;
std::vector<std::pair<std::future<size_t>, size_t>> read_tasks;
for (auto const& stripe_source_mapping : selected_stripes) {
// Iterate through the source files selected stripes
for (auto const& stripe : stripe_source_mapping.stripe_info) {
const auto stripe_info = stripe.first;
const auto stripe_footer = stripe.second;
auto stream_count = stream_info.size();
const auto total_data_size = gather_stream_info(stripe_idx,
stripe_info,
stripe_footer,
_col_meta.orc_col_map[level],
_metadata.get_types(),
use_index,
&num_dict_entries,
chunks,
stream_info,
level == 0);
if (total_data_size == 0) {
CUDF_EXPECTS(stripe_info->indexLength == 0, "Invalid index rowgroup stream data");
// In case ROW GROUP INDEX is not present and all columns are structs with no null
// stream, there is nothing to read at this level.
auto fn_check_dtype = [](auto dtype) { return dtype.id() == type_id::STRUCT; };
CUDF_EXPECTS(std::all_of(column_types.begin(), column_types.end(), fn_check_dtype),
"Expected streams data within stripe");
is_data_empty = true;
}
stripe_data.emplace_back(total_data_size, stream);
auto dst_base = static_cast<uint8_t*>(stripe_data.back().data());
// Coalesce consecutive streams into one read
while (not is_data_empty and stream_count < stream_info.size()) {
const auto d_dst = dst_base + stream_info[stream_count].dst_pos;
const auto offset = stream_info[stream_count].offset;
auto len = stream_info[stream_count].length;
stream_count++;
while (stream_count < stream_info.size() &&
stream_info[stream_count].offset == offset + len) {
len += stream_info[stream_count].length;
stream_count++;
}
if (_metadata.per_file_metadata[stripe_source_mapping.source_idx]
.source->is_device_read_preferred(len)) {
read_tasks.push_back(
std::make_pair(_metadata.per_file_metadata[stripe_source_mapping.source_idx]
.source->device_read_async(offset, len, d_dst, stream),
len));
} else {
const auto buffer =
_metadata.per_file_metadata[stripe_source_mapping.source_idx].source->host_read(
offset, len);
CUDF_EXPECTS(buffer->size() == len, "Unexpected discrepancy in bytes read.");
CUDA_TRY(hipMemcpyAsync(
d_dst, buffer->data(), len, hipMemcpyHostToDevice, stream.value()));
stream.synchronize();
}
}
const auto num_rows_per_stripe = stripe_info->numberOfRows;
const auto rowgroup_id = num_rowgroups;
auto stripe_num_rowgroups = 0;
if (use_index) {
stripe_num_rowgroups = (num_rows_per_stripe + _metadata.get_row_index_stride() - 1) /
_metadata.get_row_index_stride();
}
// Update chunks to reference streams pointers
for (size_t col_idx = 0; col_idx < num_columns; col_idx++) {
auto& chunk = chunks[stripe_idx][col_idx];
// start row, number of rows in a each stripe and total number of rows
// may change in lower levels of nesting
chunk.start_row = (level == 0)
? stripe_start_row
: _col_meta.child_start_row[stripe_idx * num_columns + col_idx];
chunk.num_rows =
(level == 0)
? stripe_info->numberOfRows
: _col_meta.num_child_rows_per_stripe[stripe_idx * num_columns + col_idx];
chunk.column_num_rows = (level == 0) ? num_rows : _col_meta.num_child_rows[col_idx];
chunk.parent_validity_info =
(level == 0) ? column_validity_info{} : _col_meta.parent_column_data[col_idx];
chunk.parent_null_count_prefix_sums =
(level == 0)
? nullptr
: null_count_prefix_sums[level - 1][_col_meta.parent_column_index[col_idx]].data();
chunk.encoding_kind = stripe_footer->columns[columns_level[col_idx].id].kind;
chunk.type_kind = _metadata.per_file_metadata[stripe_source_mapping.source_idx]
.ff.types[columns_level[col_idx].id]
.kind;
// num_child_rows for a struct column will be same, for other nested types it will be
// calculated.
chunk.num_child_rows = (chunk.type_kind != orc::STRUCT) ? 0 : chunk.num_rows;
auto const decimal_as_float64 = should_convert_decimal_column_to_float(
_decimal_cols_as_float, _metadata.per_file_metadata[0], columns_level[col_idx].id);
chunk.decimal_scale = _metadata.per_file_metadata[stripe_source_mapping.source_idx]
.ff.types[columns_level[col_idx].id]
.scale.value_or(0) |
(decimal_as_float64 ? orc::gpu::orc_decimal2float64_scale : 0);
chunk.rowgroup_id = rowgroup_id;
chunk.dtype_len = (column_types[col_idx].id() == type_id::STRING)
? sizeof(string_index_pair)
: ((column_types[col_idx].id() == type_id::LIST) or
(column_types[col_idx].id() == type_id::STRUCT))
? sizeof(size_type)
: cudf::size_of(column_types[col_idx]);
chunk.num_rowgroups = stripe_num_rowgroups;
if (chunk.type_kind == orc::TIMESTAMP) {
chunk.timestamp_type_id = _timestamp_type.id();
}
if (not is_data_empty) {
for (int k = 0; k < gpu::CI_NUM_STREAMS; k++) {
chunk.streams[k] = dst_base + stream_info[chunk.strm_id[k]].dst_pos;
}
}
}
stripe_start_row += num_rows_per_stripe;
num_rowgroups += stripe_num_rowgroups;
stripe_idx++;
}
}
for (auto& task : read_tasks) {
CUDF_EXPECTS(task.first.get() == task.second, "Unexpected discrepancy in bytes read.");
}
// Process dataset chunk pages into output columns
if (stripe_data.size() != 0) {
auto row_groups =
cudf::detail::hostdevice_2dvector<gpu::RowGroup>(num_rowgroups, num_columns, stream);
if (level > 0 and row_groups.size().first) {
cudf::host_span<gpu::RowGroup> row_groups_span(row_groups.base_host_ptr(),
num_rowgroups * num_columns);
auto& rw_grp_meta = _col_meta.rwgrp_meta;
// Update start row and num rows per row group
std::transform(rw_grp_meta.begin(),
rw_grp_meta.end(),
row_groups_span.begin(),
rw_grp_meta.begin(),
[&](auto meta, auto& row_grp) {
row_grp.num_rows = meta.num_rows;
row_grp.start_row = meta.start_row;
return meta;
});
}
// Setup row group descriptors if using indexes
if (_metadata.per_file_metadata[0].ps.compression != orc::NONE and not is_data_empty) {
auto decomp_data =
decompress_stripe_data(chunks,
stripe_data,
_metadata.per_file_metadata[0].decompressor.get(),
stream_info,
total_num_stripes,
row_groups,
_metadata.get_row_index_stride(),
level == 0,
stream);
stripe_data.clear();
stripe_data.push_back(std::move(decomp_data));
} else {
if (row_groups.size().first) {
chunks.host_to_device(stream);
row_groups.host_to_device(stream);
gpu::ParseRowGroupIndex(row_groups.base_device_ptr(),
nullptr,
chunks.base_device_ptr(),
num_columns,
total_num_stripes,
num_rowgroups,
_metadata.get_row_index_stride(),
level == 0,
stream);
}
}
for (size_t i = 0; i < column_types.size(); ++i) {
bool is_nullable = false;
for (size_t j = 0; j < total_num_stripes; ++j) {
if (chunks[j][i].strm_len[gpu::CI_PRESENT] != 0) {
is_nullable = true;
break;
}
}
auto is_list_type = (column_types[i].id() == type_id::LIST);
auto n_rows = (level == 0) ? num_rows : _col_meta.num_child_rows[i];
// For list column, offset column will be always size + 1
if (is_list_type) n_rows++;
out_buffers[level].emplace_back(column_types[i], n_rows, is_nullable, stream, _mr);
}
if (not is_data_empty) {
decode_stream_data(chunks,
num_dict_entries,
skip_rows,
tz_table.view(),
row_groups,
_metadata.get_row_index_stride(),
out_buffers[level],
level,
stream);
}
// Extract information to process nested child columns
if (nested_col.size()) {
if (not is_data_empty) {
scan_null_counts(chunks, null_count_prefix_sums[level], stream);
}
row_groups.device_to_host(stream, true);
aggregate_child_meta(chunks, row_groups, out_buffers[level], nested_col, level);
}
// ORC stores number of elements at each row, so we need to generate offsets from that
if (nested_col.size()) {
std::vector<list_buffer_data> buff_data;
std::for_each(
out_buffers[level].begin(), out_buffers[level].end(), [&buff_data](auto& out_buffer) {
if (out_buffer.type.id() == type_id::LIST) {
auto data = static_cast<size_type*>(out_buffer.data());
buff_data.emplace_back(list_buffer_data{data, out_buffer.size});
}
});
if (buff_data.size()) {
auto const dev_buff_data = cudf::detail::make_device_uvector_async(buff_data, stream);
generate_offsets_for_list(dev_buff_data, stream);
}
}
}
}
}
// If out_columns is empty, then create columns from buffer.
if (out_columns.empty()) {
create_columns(std::move(out_buffers), out_columns, schema_info, stream);
}
// Return column names (must match order of returned columns)
out_metadata.column_names.reserve(schema_info.size());
std::transform(schema_info.cbegin(),
schema_info.cend(),
std::back_inserter(out_metadata.column_names),
[](auto info) { return info.name; });
out_metadata.schema_info = std::move(schema_info);
for (const auto& meta : _metadata.per_file_metadata) {
for (const auto& kv : meta.ff.metadata) {
out_metadata.user_data.insert({kv.name, kv.value});
}
}
return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)};
}
// Forward to implementation
reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>>&& sources,
orc_reader_options const& options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
_impl = std::make_unique<impl>(std::move(sources), options, mr);
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read(orc_reader_options const& options, rmm::cuda_stream_view stream)
{
return _impl->read(
options.get_skip_rows(), options.get_num_rows(), options.get_stripes(), stream);
}
} // namespace orc
} // namespace detail
} // namespace io
} // namespace cudf
|
c94d088ca3d6aa17a15d51fa0f27fcd9d2490aa2.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO ORC reader class implementation
*/
#include "orc.h"
#include "orc_gpu.h"
#include "reader_impl.hpp"
#include "timezone.cuh"
#include <io/comp/gpuinflate.h>
#include <io/utilities/config_utils.hpp>
#include <io/utilities/time_utils.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <nvcomp/snappy.h>
#include <algorithm>
#include <iterator>
namespace cudf {
namespace io {
namespace detail {
namespace orc {
using namespace cudf::io::orc;
namespace {
/**
* @brief Function that translates ORC data kind to cuDF type enum
*/
constexpr type_id to_type_id(const orc::SchemaType& schema,
bool use_np_dtypes,
type_id timestamp_type_id,
bool decimals_as_float64)
{
switch (schema.kind) {
case orc::BOOLEAN: return type_id::BOOL8;
case orc::BYTE: return type_id::INT8;
case orc::SHORT: return type_id::INT16;
case orc::INT: return type_id::INT32;
case orc::LONG: return type_id::INT64;
case orc::FLOAT: return type_id::FLOAT32;
case orc::DOUBLE: return type_id::FLOAT64;
case orc::STRING:
case orc::BINARY:
case orc::VARCHAR:
case orc::CHAR:
// Variable-length types can all be mapped to STRING
return type_id::STRING;
case orc::TIMESTAMP:
return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id
: type_id::TIMESTAMP_NANOSECONDS;
case orc::DATE:
// There isn't a (DAYS -> np.dtype) mapping
return (use_np_dtypes) ? type_id::TIMESTAMP_MILLISECONDS : type_id::TIMESTAMP_DAYS;
case orc::DECIMAL: return (decimals_as_float64) ? type_id::FLOAT64 : type_id::DECIMAL64;
// Need to update once cuDF plans to support map type
case orc::MAP:
case orc::LIST: return type_id::LIST;
case orc::STRUCT: return type_id::STRUCT;
default: break;
}
return type_id::EMPTY;
}
constexpr std::pair<gpu::StreamIndexType, uint32_t> get_index_type_and_pos(
const orc::StreamKind kind, uint32_t skip_count, bool non_child)
{
switch (kind) {
case orc::DATA:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 8;
return std::make_pair(gpu::CI_DATA, skip_count);
case orc::LENGTH:
case orc::SECONDARY:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 16;
return std::make_pair(gpu::CI_DATA2, skip_count);
case orc::DICTIONARY_DATA: return std::make_pair(gpu::CI_DICTIONARY, skip_count);
case orc::PRESENT:
skip_count += (non_child ? 1 : 0);
return std::make_pair(gpu::CI_PRESENT, skip_count);
case orc::ROW_INDEX: return std::make_pair(gpu::CI_INDEX, skip_count);
default:
// Skip this stream as it's not strictly required
return std::make_pair(gpu::CI_NUM_STREAMS, 0);
}
}
/**
* @brief struct to store buffer data and size of list buffer
*/
struct list_buffer_data {
size_type* data;
size_type size;
};
// Generates offsets for list buffer from number of elements in a row.
void generate_offsets_for_list(rmm::device_uvector<list_buffer_data> const& buff_data,
rmm::cuda_stream_view stream)
{
auto transformer = [] __device__(list_buffer_data list_data) {
thrust::exclusive_scan(
thrust::seq, list_data.data, list_data.data + list_data.size, list_data.data);
};
thrust::for_each(rmm::exec_policy(stream), buff_data.begin(), buff_data.end(), transformer);
stream.synchronize();
}
/**
* @brief Struct that maps ORC streams to columns
*/
struct orc_stream_info {
orc_stream_info() = default;
explicit orc_stream_info(
uint64_t offset_, size_t dst_pos_, uint32_t length_, uint32_t gdf_idx_, uint32_t stripe_idx_)
: offset(offset_),
dst_pos(dst_pos_),
length(length_),
gdf_idx(gdf_idx_),
stripe_idx(stripe_idx_)
{
}
uint64_t offset; // offset in file
size_t dst_pos; // offset in memory relative to start of compressed stripe data
size_t length; // length in file
uint32_t gdf_idx; // column index
uint32_t stripe_idx; // stripe index
};
/**
* @brief Function that populates column descriptors stream/chunk
*/
size_t gather_stream_info(const size_t stripe_index,
const orc::StripeInformation* stripeinfo,
const orc::StripeFooter* stripefooter,
const std::vector<int>& orc2gdf,
const std::vector<orc::SchemaType> types,
bool use_index,
size_t* num_dictionary_entries,
cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks,
std::vector<orc_stream_info>& stream_info,
bool apply_struct_map)
{
uint64_t src_offset = 0;
uint64_t dst_offset = 0;
for (const auto& stream : stripefooter->streams) {
if (!stream.column_id || *stream.column_id >= orc2gdf.size()) {
dst_offset += stream.length;
continue;
}
auto const column_id = *stream.column_id;
auto col = orc2gdf[column_id];
if (col == -1 and apply_struct_map) {
// A struct-type column has no data itself, but rather child columns
// for each of its fields. There is only a PRESENT stream, which
// needs to be included for the reader.
const auto schema_type = types[column_id];
if (schema_type.subtypes.size() != 0) {
if (schema_type.kind == orc::STRUCT && stream.kind == orc::PRESENT) {
for (const auto& idx : schema_type.subtypes) {
auto child_idx = (idx < orc2gdf.size()) ? orc2gdf[idx] : -1;
if (child_idx >= 0) {
col = child_idx;
auto& chunk = chunks[stripe_index][col];
chunk.strm_id[gpu::CI_PRESENT] = stream_info.size();
chunk.strm_len[gpu::CI_PRESENT] = stream.length;
}
}
}
}
}
if (col != -1) {
if (src_offset >= stripeinfo->indexLength || use_index) {
// NOTE: skip_count field is temporarily used to track index ordering
auto& chunk = chunks[stripe_index][col];
const auto idx =
get_index_type_and_pos(stream.kind, chunk.skip_count, col == orc2gdf[column_id]);
if (idx.first < gpu::CI_NUM_STREAMS) {
chunk.strm_id[idx.first] = stream_info.size();
chunk.strm_len[idx.first] = stream.length;
chunk.skip_count = idx.second;
if (idx.first == gpu::CI_DICTIONARY) {
chunk.dictionary_start = *num_dictionary_entries;
chunk.dict_len = stripefooter->columns[column_id].dictionarySize;
*num_dictionary_entries += stripefooter->columns[column_id].dictionarySize;
}
}
}
stream_info.emplace_back(
stripeinfo->offset + src_offset, dst_offset, stream.length, col, stripe_index);
dst_offset += stream.length;
}
src_offset += stream.length;
}
return dst_offset;
}
/**
* @brief Determines if a column should be converted from decimal to float
*/
bool should_convert_decimal_column_to_float(const std::vector<std::string>& columns_to_convert,
cudf::io::orc::metadata& metadata,
int column_index)
{
return (std::find(columns_to_convert.begin(),
columns_to_convert.end(),
metadata.column_name(column_index)) != columns_to_convert.end());
}
} // namespace
void snappy_decompress(device_span<gpu_inflate_input_s> comp_in,
device_span<gpu_inflate_status_s> comp_stat,
size_t max_uncomp_page_size,
rmm::cuda_stream_view stream)
{
size_t num_blocks = comp_in.size();
size_t temp_size;
auto status =
nvcompBatchedSnappyDecompressGetTempSize(num_blocks, max_uncomp_page_size, &temp_size);
CUDF_EXPECTS(nvcompStatus_t::nvcompSuccess == status,
"Unable to get scratch size for snappy decompression");
rmm::device_buffer scratch(temp_size, stream);
rmm::device_uvector<void const*> compressed_data_ptrs(num_blocks, stream);
rmm::device_uvector<size_t> compressed_data_sizes(num_blocks, stream);
rmm::device_uvector<void*> uncompressed_data_ptrs(num_blocks, stream);
rmm::device_uvector<size_t> uncompressed_data_sizes(num_blocks, stream);
rmm::device_uvector<size_t> actual_uncompressed_data_sizes(num_blocks, stream);
rmm::device_uvector<nvcompStatus_t> statuses(num_blocks, stream);
// Prepare the vectors
auto comp_it = thrust::make_zip_iterator(compressed_data_ptrs.begin(),
compressed_data_sizes.begin(),
uncompressed_data_ptrs.begin(),
uncompressed_data_sizes.data());
thrust::transform(rmm::exec_policy(stream),
comp_in.begin(),
comp_in.end(),
comp_it,
[] __device__(gpu_inflate_input_s in) {
return thrust::make_tuple(in.srcDevice, in.srcSize, in.dstDevice, in.dstSize);
});
status = nvcompBatchedSnappyDecompressAsync(compressed_data_ptrs.data(),
compressed_data_sizes.data(),
uncompressed_data_sizes.data(),
actual_uncompressed_data_sizes.data(),
num_blocks,
scratch.data(),
scratch.size(),
uncompressed_data_ptrs.data(),
statuses.data(),
stream.value());
CUDF_EXPECTS(nvcompStatus_t::nvcompSuccess == status, "unable to perform snappy decompression");
CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream),
statuses.begin(),
statuses.end(),
thrust::make_constant_iterator(nvcompStatus_t::nvcompSuccess)),
"Error during snappy decompression");
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
num_blocks,
[=, actual_uncomp_sizes = actual_uncompressed_data_sizes.data()] __device__(auto i) {
comp_stat[i].bytes_written = actual_uncomp_sizes[i];
comp_stat[i].status = 0;
});
}
rmm::device_buffer reader::impl::decompress_stripe_data(
cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks,
const std::vector<rmm::device_buffer>& stripe_data,
const OrcDecompressor* decompressor,
std::vector<orc_stream_info>& stream_info,
size_t num_stripes,
cudf::detail::hostdevice_2dvector<gpu::RowGroup>& row_groups,
size_t row_index_stride,
bool use_base_stride,
rmm::cuda_stream_view stream)
{
// Parse the columns' compressed info
hostdevice_vector<gpu::CompressedStreamInfo> compinfo(0, stream_info.size(), stream);
for (const auto& info : stream_info) {
compinfo.insert(gpu::CompressedStreamInfo(
static_cast<const uint8_t*>(stripe_data[info.stripe_idx].data()) + info.dst_pos,
info.length));
}
compinfo.host_to_device(stream);
gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio(),
stream);
compinfo.device_to_host(stream, true);
// Count the exact number of compressed blocks
size_t num_compressed_blocks = 0;
size_t num_uncompressed_blocks = 0;
size_t total_decomp_size = 0;
for (size_t i = 0; i < compinfo.size(); ++i) {
num_compressed_blocks += compinfo[i].num_compressed_blocks;
num_uncompressed_blocks += compinfo[i].num_uncompressed_blocks;
total_decomp_size += compinfo[i].max_uncompressed_size;
}
CUDF_EXPECTS(total_decomp_size > 0, "No decompressible data found");
rmm::device_buffer decomp_data(total_decomp_size, stream);
rmm::device_uvector<gpu_inflate_input_s> inflate_in(
num_compressed_blocks + num_uncompressed_blocks, stream);
rmm::device_uvector<gpu_inflate_status_s> inflate_out(num_compressed_blocks, stream);
// Parse again to populate the decompression input/output buffers
size_t decomp_offset = 0;
uint32_t max_uncomp_block_size = 0;
uint32_t start_pos = 0;
uint32_t start_pos_uncomp = (uint32_t)num_compressed_blocks;
for (size_t i = 0; i < compinfo.size(); ++i) {
auto dst_base = static_cast<uint8_t*>(decomp_data.data());
compinfo[i].uncompressed_data = dst_base + decomp_offset;
compinfo[i].decctl = inflate_in.data() + start_pos;
compinfo[i].decstatus = inflate_out.data() + start_pos;
compinfo[i].copyctl = inflate_in.data() + start_pos_uncomp;
stream_info[i].dst_pos = decomp_offset;
decomp_offset += compinfo[i].max_uncompressed_size;
start_pos += compinfo[i].num_compressed_blocks;
start_pos_uncomp += compinfo[i].num_uncompressed_blocks;
max_uncomp_block_size =
std::max(max_uncomp_block_size, compinfo[i].max_uncompressed_block_size);
}
compinfo.host_to_device(stream);
gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio(),
stream);
// Dispatch batches of blocks to decompress
if (num_compressed_blocks > 0) {
switch (decompressor->GetKind()) {
case orc::ZLIB:
CUDA_TRY(
gpuinflate(inflate_in.data(), inflate_out.data(), num_compressed_blocks, 0, stream));
break;
case orc::SNAPPY:
if (nvcomp_integration::is_stable_enabled()) {
device_span<gpu_inflate_input_s> inflate_in_view{inflate_in.data(),
num_compressed_blocks};
device_span<gpu_inflate_status_s> inflate_out_view{inflate_out.data(),
num_compressed_blocks};
snappy_decompress(inflate_in_view, inflate_out_view, max_uncomp_block_size, stream);
} else {
CUDA_TRY(
gpu_unsnap(inflate_in.data(), inflate_out.data(), num_compressed_blocks, stream));
}
break;
default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break;
}
}
if (num_uncompressed_blocks > 0) {
CUDA_TRY(gpu_copy_uncompressed_blocks(
inflate_in.data() + num_compressed_blocks, num_uncompressed_blocks, stream));
}
gpu::PostDecompressionReassemble(compinfo.device_ptr(), compinfo.size(), stream);
// Update the stream information with the updated uncompressed info
// TBD: We could update the value from the information we already
// have in stream_info[], but using the gpu results also updates
// max_uncompressed_size to the actual uncompressed size, or zero if
// decompression failed.
compinfo.device_to_host(stream, true);
const size_t num_columns = chunks.size().second;
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto& chunk = chunks[i][j];
for (int k = 0; k < gpu::CI_NUM_STREAMS; ++k) {
if (chunk.strm_len[k] > 0 && chunk.strm_id[k] < compinfo.size()) {
chunk.streams[k] = compinfo[chunk.strm_id[k]].uncompressed_data;
chunk.strm_len[k] = compinfo[chunk.strm_id[k]].max_uncompressed_size;
}
}
}
}
if (row_groups.size().first) {
chunks.host_to_device(stream);
row_groups.host_to_device(stream);
gpu::ParseRowGroupIndex(row_groups.base_device_ptr(),
compinfo.device_ptr(),
chunks.base_device_ptr(),
num_columns,
num_stripes,
row_groups.size().first,
row_index_stride,
use_base_stride,
stream);
}
return decomp_data;
}
/**
* @brief Updates null mask of columns whose parent is a struct column.
* If struct column has null element, that row would be
* skipped while writing child column in ORC, so we need to insert the missing null
* elements in child column.
* There is another behavior from pyspark, where if the child column doesn't have any null
* elements, it will not have present stream, so in that case parent null mask need to be
* copied to child column.
*
* @param chunks Vector of list of column chunk descriptors
* @param out_buffers Output columns' device buffers
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource to use for device memory allocation
*/
void update_null_mask(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks,
std::vector<column_buffer>& out_buffers,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
const auto num_stripes = chunks.size().first;
const auto num_columns = chunks.size().second;
bool is_mask_updated = false;
for (size_t col_idx = 0; col_idx < num_columns; ++col_idx) {
if (chunks[0][col_idx].parent_validity_info.valid_map_base != nullptr) {
if (not is_mask_updated) {
chunks.device_to_host(stream, true);
is_mask_updated = true;
}
auto parent_valid_map_base = chunks[0][col_idx].parent_validity_info.valid_map_base;
auto child_valid_map_base = out_buffers[col_idx].null_mask();
auto child_mask_len =
chunks[0][col_idx].column_num_rows - chunks[0][col_idx].parent_validity_info.null_count;
auto parent_mask_len = chunks[0][col_idx].column_num_rows;
if (child_valid_map_base != nullptr) {
rmm::device_uvector<uint32_t> dst_idx(child_mask_len, stream);
// Copy indexes at which the parent has valid value.
thrust::copy_if(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + parent_mask_len,
dst_idx.begin(),
[parent_valid_map_base] __device__(auto idx) {
return bit_is_set(parent_valid_map_base, idx);
});
auto merged_null_mask = cudf::detail::create_null_mask(
parent_mask_len, mask_state::ALL_NULL, rmm::cuda_stream_view(stream), mr);
auto merged_mask = static_cast<bitmask_type*>(merged_null_mask.data());
uint32_t* dst_idx_ptr = dst_idx.data();
// Copy child valid bits from child column to valid indexes, this will merge both child and
// parent null masks
thrust::for_each(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + dst_idx.size(),
[child_valid_map_base, dst_idx_ptr, merged_mask] __device__(auto idx) {
if (bit_is_set(child_valid_map_base, idx)) {
cudf::set_bit(merged_mask, dst_idx_ptr[idx]);
};
});
out_buffers[col_idx]._null_mask = std::move(merged_null_mask);
} else {
// Since child column doesn't have a mask, copy parent null mask
auto mask_size = bitmask_allocation_size_bytes(parent_mask_len);
out_buffers[col_idx]._null_mask =
rmm::device_buffer(static_cast<void*>(parent_valid_map_base), mask_size, stream, mr);
}
}
}
thrust::counting_iterator<int> col_idx_it(0);
thrust::counting_iterator<int> stripe_idx_it(0);
if (is_mask_updated) {
// Update chunks with pointers to column data which might have been changed.
std::for_each(stripe_idx_it, stripe_idx_it + num_stripes, [&](auto stripe_idx) {
std::for_each(col_idx_it, col_idx_it + num_columns, [&](auto col_idx) {
auto& chunk = chunks[stripe_idx][col_idx];
chunk.valid_map_base = out_buffers[col_idx].null_mask();
});
});
chunks.host_to_device(stream, true);
}
}
/**
* @brief Compute the per-stripe prefix sum of null count, for each struct column in the current
* layer.
*/
void scan_null_counts(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc> const& chunks,
cudf::host_span<rmm::device_uvector<uint32_t>> prefix_sums,
rmm::cuda_stream_view stream)
{
auto const num_stripes = chunks.size().first;
if (num_stripes == 0) return;
auto const num_columns = chunks.size().second;
std::vector<thrust::pair<size_type, cudf::device_span<uint32_t>>> prefix_sums_to_update;
for (auto col_idx = 0ul; col_idx < num_columns; ++col_idx) {
// Null counts sums are only needed for children of struct columns
if (chunks[0][col_idx].type_kind == STRUCT) {
prefix_sums_to_update.emplace_back(col_idx, prefix_sums[col_idx]);
}
}
auto const d_prefix_sums_to_update =
cudf::detail::make_device_uvector_async(prefix_sums_to_update, stream);
thrust::for_each(rmm::exec_policy(stream),
d_prefix_sums_to_update.begin(),
d_prefix_sums_to_update.end(),
[chunks = cudf::detail::device_2dspan<gpu::ColumnDesc const>{chunks}] __device__(
auto const& idx_psums) {
auto const col_idx = idx_psums.first;
auto const psums = idx_psums.second;
thrust::transform(
thrust::seq,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + psums.size(),
psums.begin(),
[&](auto stripe_idx) { return chunks[stripe_idx][col_idx].null_count; });
thrust::inclusive_scan(thrust::seq, psums.begin(), psums.end(), psums.begin());
});
// `prefix_sums_to_update` goes out of scope, copy has to be done before we return
stream.synchronize();
}
void reader::impl::decode_stream_data(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks,
size_t num_dicts,
size_t skip_rows,
timezone_table_view tz_table,
cudf::detail::hostdevice_2dvector<gpu::RowGroup>& row_groups,
size_t row_index_stride,
std::vector<column_buffer>& out_buffers,
size_t level,
rmm::cuda_stream_view stream)
{
const auto num_stripes = chunks.size().first;
const auto num_columns = chunks.size().second;
thrust::counting_iterator<int> col_idx_it(0);
thrust::counting_iterator<int> stripe_idx_it(0);
// Update chunks with pointers to column data
std::for_each(stripe_idx_it, stripe_idx_it + num_stripes, [&](auto stripe_idx) {
std::for_each(col_idx_it, col_idx_it + num_columns, [&](auto col_idx) {
auto& chunk = chunks[stripe_idx][col_idx];
chunk.column_data_base = out_buffers[col_idx].data();
chunk.valid_map_base = out_buffers[col_idx].null_mask();
});
});
// Allocate global dictionary for deserializing
rmm::device_uvector<gpu::DictionaryEntry> global_dict(num_dicts, stream);
chunks.host_to_device(stream, true);
gpu::DecodeNullsAndStringDictionaries(
chunks.base_device_ptr(), global_dict.data(), num_columns, num_stripes, skip_rows, stream);
if (level > 0) {
// Update nullmasks for children if parent was a struct and had null mask
update_null_mask(chunks, out_buffers, stream, _mr);
}
// Update the null map for child columns
gpu::DecodeOrcColumnData(chunks.base_device_ptr(),
global_dict.data(),
row_groups,
num_columns,
num_stripes,
skip_rows,
tz_table,
row_groups.size().first,
row_index_stride,
level,
stream);
chunks.device_to_host(stream, true);
std::for_each(col_idx_it + 0, col_idx_it + num_columns, [&](auto col_idx) {
out_buffers[col_idx].null_count() =
std::accumulate(stripe_idx_it + 0,
stripe_idx_it + num_stripes,
0,
[&](auto null_count, auto const stripe_idx) {
return null_count + chunks[stripe_idx][col_idx].null_count;
});
});
}
// Aggregate child column metadata per stripe and per column
void reader::impl::aggregate_child_meta(cudf::detail::host_2dspan<gpu::ColumnDesc> chunks,
cudf::detail::host_2dspan<gpu::RowGroup> row_groups,
std::vector<column_buffer>& out_buffers,
std::vector<orc_column_meta> const& list_col,
const size_type level)
{
const auto num_of_stripes = chunks.size().first;
const auto num_of_rowgroups = row_groups.size().first;
const auto num_parent_cols = selected_columns.levels[level].size();
const auto num_child_cols = selected_columns.levels[level + 1].size();
const auto number_of_child_chunks = num_child_cols * num_of_stripes;
auto& num_child_rows = _col_meta.num_child_rows;
auto& parent_column_data = _col_meta.parent_column_data;
// Reset the meta to store child column details.
num_child_rows.resize(selected_columns.levels[level + 1].size());
std::fill(num_child_rows.begin(), num_child_rows.end(), 0);
parent_column_data.resize(number_of_child_chunks);
_col_meta.parent_column_index.resize(number_of_child_chunks);
_col_meta.child_start_row.resize(number_of_child_chunks);
_col_meta.num_child_rows_per_stripe.resize(number_of_child_chunks);
_col_meta.rwgrp_meta.resize(num_of_rowgroups * num_child_cols);
auto child_start_row = cudf::detail::host_2dspan<uint32_t>(
_col_meta.child_start_row.data(), num_of_stripes, num_child_cols);
auto num_child_rows_per_stripe = cudf::detail::host_2dspan<uint32_t>(
_col_meta.num_child_rows_per_stripe.data(), num_of_stripes, num_child_cols);
auto rwgrp_meta = cudf::detail::host_2dspan<reader_column_meta::row_group_meta>(
_col_meta.rwgrp_meta.data(), num_of_rowgroups, num_child_cols);
int index = 0; // number of child column processed
// For each parent column, update its child column meta for each stripe.
std::for_each(list_col.cbegin(), list_col.cend(), [&](const auto p_col) {
const auto parent_col_idx = _col_meta.orc_col_map[level][p_col.id];
auto start_row = 0;
auto processed_row_groups = 0;
for (size_t stripe_id = 0; stripe_id < num_of_stripes; stripe_id++) {
// Aggregate num_rows and start_row from processed parent columns per row groups
if (num_of_rowgroups) {
auto stripe_num_row_groups = chunks[stripe_id][parent_col_idx].num_rowgroups;
auto processed_child_rows = 0;
for (size_t rowgroup_id = 0; rowgroup_id < stripe_num_row_groups;
rowgroup_id++, processed_row_groups++) {
const auto child_rows = row_groups[processed_row_groups][parent_col_idx].num_child_rows;
for (size_type id = 0; id < p_col.num_children; id++) {
const auto child_col_idx = index + id;
rwgrp_meta[processed_row_groups][child_col_idx].start_row = processed_child_rows;
rwgrp_meta[processed_row_groups][child_col_idx].num_rows = child_rows;
}
processed_child_rows += child_rows;
}
}
// Aggregate start row, number of rows per chunk and total number of rows in a column
const auto child_rows = chunks[stripe_id][parent_col_idx].num_child_rows;
for (size_type id = 0; id < p_col.num_children; id++) {
const auto child_col_idx = index + id;
num_child_rows[child_col_idx] += child_rows;
num_child_rows_per_stripe[stripe_id][child_col_idx] = child_rows;
// start row could be different for each column when there is nesting at each stripe level
child_start_row[stripe_id][child_col_idx] = (stripe_id == 0) ? 0 : start_row;
}
start_row += child_rows;
}
// Parent column null mask and null count would be required for child column
// to adjust its nullmask.
auto type = out_buffers[parent_col_idx].type.id();
auto parent_null_count = static_cast<uint32_t>(out_buffers[parent_col_idx].null_count());
auto parent_valid_map = out_buffers[parent_col_idx].null_mask();
auto num_rows = out_buffers[parent_col_idx].size;
for (size_type id = 0; id < p_col.num_children; id++) {
const auto child_col_idx = index + id;
_col_meta.parent_column_index[child_col_idx] = parent_col_idx;
if (type == type_id::STRUCT) {
parent_column_data[child_col_idx] = {parent_valid_map, parent_null_count};
// Number of rows in child will remain same as parent in case of struct column
num_child_rows[child_col_idx] = num_rows;
} else {
parent_column_data[child_col_idx] = {nullptr, 0};
}
}
index += p_col.num_children;
});
}
std::string get_map_child_col_name(size_t const idx) { return (idx == 0) ? "key" : "value"; }
std::unique_ptr<column> reader::impl::create_empty_column(const size_type orc_col_id,
column_name_info& schema_info,
rmm::cuda_stream_view stream)
{
schema_info.name = _metadata.column_name(0, orc_col_id);
// If the column type is orc::DECIMAL see if the user
// desires it to be converted to float64 or not
auto const decimal_as_float64 = should_convert_decimal_column_to_float(
_decimal_cols_as_float, _metadata.per_file_metadata[0], orc_col_id);
auto const type = to_type_id(
_metadata.get_schema(orc_col_id), _use_np_dtypes, _timestamp_type.id(), decimal_as_float64);
int32_t scale = 0;
std::vector<std::unique_ptr<column>> child_columns;
std::unique_ptr<column> out_col = nullptr;
auto kind = _metadata.get_col_type(orc_col_id).kind;
switch (kind) {
case orc::LIST:
schema_info.children.emplace_back("offsets");
schema_info.children.emplace_back("");
out_col = make_lists_column(
0,
make_empty_column(type_id::INT32),
create_empty_column(
_metadata.get_col_type(orc_col_id).subtypes[0], schema_info.children.back(), stream),
0,
rmm::device_buffer{0, stream},
stream);
break;
case orc::MAP: {
schema_info.children.emplace_back("offsets");
schema_info.children.emplace_back("struct");
const auto child_column_ids = _metadata.get_col_type(orc_col_id).subtypes;
for (size_t idx = 0; idx < _metadata.get_col_type(orc_col_id).subtypes.size(); idx++) {
auto& children_schema = schema_info.children.back().children;
children_schema.emplace_back("");
child_columns.push_back(create_empty_column(
child_column_ids[idx], schema_info.children.back().children.back(), stream));
auto name = get_map_child_col_name(idx);
children_schema[idx].name = name;
}
auto struct_col =
make_structs_column(0, std::move(child_columns), 0, rmm::device_buffer{0, stream}, stream);
out_col = make_lists_column(0,
make_empty_column(type_id::INT32),
std::move(struct_col),
0,
rmm::device_buffer{0, stream},
stream);
} break;
case orc::STRUCT:
for (const auto col : _metadata.get_col_type(orc_col_id).subtypes) {
schema_info.children.emplace_back("");
child_columns.push_back(create_empty_column(col, schema_info.children.back(), stream));
}
out_col =
make_structs_column(0, std::move(child_columns), 0, rmm::device_buffer{0, stream}, stream);
break;
case orc::DECIMAL:
if (type == type_id::DECIMAL64) {
scale = -static_cast<int32_t>(_metadata.get_types()[orc_col_id].scale.value_or(0));
}
out_col = make_empty_column(data_type(type, scale));
break;
default: out_col = make_empty_column(type);
}
return out_col;
}
// Adds child column buffers to parent column
column_buffer&& reader::impl::assemble_buffer(const size_type orc_col_id,
std::vector<std::vector<column_buffer>>& col_buffers,
const size_t level,
rmm::cuda_stream_view stream)
{
auto const col_id = _col_meta.orc_col_map[level][orc_col_id];
auto& col_buffer = col_buffers[level][col_id];
col_buffer.name = _metadata.column_name(0, orc_col_id);
auto kind = _metadata.get_col_type(orc_col_id).kind;
switch (kind) {
case orc::LIST:
case orc::STRUCT:
for (auto const& col : selected_columns.children[orc_col_id]) {
col_buffer.children.emplace_back(assemble_buffer(col, col_buffers, level + 1, stream));
}
break;
case orc::MAP: {
std::vector<column_buffer> child_col_buffers;
// Get child buffers
for (size_t idx = 0; idx < selected_columns.children[orc_col_id].size(); idx++) {
auto name = get_map_child_col_name(idx);
auto col = selected_columns.children[orc_col_id][idx];
child_col_buffers.emplace_back(assemble_buffer(col, col_buffers, level + 1, stream));
child_col_buffers.back().name = name;
}
// Create a struct buffer
auto num_rows = child_col_buffers[0].size;
auto struct_buffer =
column_buffer(cudf::data_type(type_id::STRUCT), num_rows, false, stream, _mr);
struct_buffer.children = std::move(child_col_buffers);
struct_buffer.name = "struct";
col_buffer.children.emplace_back(std::move(struct_buffer));
} break;
default: break;
}
return std::move(col_buffer);
}
// creates columns along with schema information for each column
void reader::impl::create_columns(std::vector<std::vector<column_buffer>>&& col_buffers,
std::vector<std::unique_ptr<column>>& out_columns,
std::vector<column_name_info>& schema_info,
rmm::cuda_stream_view stream)
{
std::transform(selected_columns.levels[0].begin(),
selected_columns.levels[0].end(),
std::back_inserter(out_columns),
[&](auto const col_meta) {
schema_info.emplace_back("");
auto col_buffer = assemble_buffer(col_meta.id, col_buffers, 0, stream);
return make_column(col_buffer, &schema_info.back(), stream, _mr);
});
}
reader::impl::impl(std::vector<std::unique_ptr<datasource>>&& sources,
orc_reader_options const& options,
rmm::mr::device_memory_resource* mr)
: _mr(mr),
_sources(std::move(sources)),
_metadata{_sources},
selected_columns{_metadata.select_columns(options.get_columns())}
{
// Override output timestamp resolution if requested
if (options.get_timestamp_type().id() != type_id::EMPTY) {
_timestamp_type = options.get_timestamp_type();
}
// Enable or disable attempt to use row index for parsing
_use_index = options.is_enabled_use_index();
// Enable or disable the conversion to numpy-compatible dtypes
_use_np_dtypes = options.is_enabled_use_np_dtypes();
// Control decimals conversion (float64 or int64 with optional scale)
_decimal_cols_as_float = options.get_decimal_cols_as_float();
}
timezone_table reader::impl::compute_timezone_table(
const std::vector<cudf::io::orc::metadata::stripe_source_mapping>& selected_stripes,
rmm::cuda_stream_view stream)
{
if (selected_stripes.empty()) return {};
auto const has_timestamp_column = std::any_of(
selected_columns.levels.cbegin(), selected_columns.levels.cend(), [&](auto& col_lvl) {
return std::any_of(col_lvl.cbegin(), col_lvl.cend(), [&](auto& col_meta) {
return _metadata.get_col_type(col_meta.id).kind == TypeKind::TIMESTAMP;
});
});
if (not has_timestamp_column) return {};
return build_timezone_transition_table(selected_stripes[0].stripe_info[0].second->writerTimezone,
stream);
}
table_with_metadata reader::impl::read(size_type skip_rows,
size_type num_rows,
const std::vector<std::vector<size_type>>& stripes,
rmm::cuda_stream_view stream)
{
// Selected columns at different levels of nesting are stored in different elements
// of `selected_columns`; thus, size == 1 means no nested columns
CUDF_EXPECTS(skip_rows == 0 or selected_columns.num_levels() == 1,
"skip_rows is not supported by nested columns");
std::vector<std::unique_ptr<column>> out_columns;
// buffer and stripe data are stored as per nesting level
std::vector<std::vector<column_buffer>> out_buffers(selected_columns.num_levels());
std::vector<column_name_info> schema_info;
std::vector<std::vector<rmm::device_buffer>> lvl_stripe_data(selected_columns.num_levels());
std::vector<std::vector<rmm::device_uvector<uint32_t>>> null_count_prefix_sums;
table_metadata out_metadata;
// There are no columns in the table
if (selected_columns.num_levels() == 0)
return {std::make_unique<table>(), std::move(out_metadata)};
// Select only stripes required (aka row groups)
const auto selected_stripes = _metadata.select_stripes(stripes, skip_rows, num_rows);
auto const tz_table = compute_timezone_table(selected_stripes, stream);
// Iterates through levels of nested columns, child column will be one level down
// compared to parent column.
for (size_t level = 0; level < selected_columns.num_levels(); level++) {
auto& columns_level = selected_columns.levels[level];
// Association between each ORC column and its cudf::column
_col_meta.orc_col_map.emplace_back(_metadata.get_num_cols(), -1);
std::vector<orc_column_meta> nested_col;
bool is_data_empty = false;
// Get a list of column data types
std::vector<data_type> column_types;
for (auto& col : columns_level) {
// If the column type is orc::DECIMAL see if the user
// desires it to be converted to float64 or not
auto const decimal_as_float64 = should_convert_decimal_column_to_float(
_decimal_cols_as_float, _metadata.per_file_metadata[0], col.id);
auto col_type = to_type_id(
_metadata.get_col_type(col.id), _use_np_dtypes, _timestamp_type.id(), decimal_as_float64);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
// Remove this once we support Decimal128 data type
CUDF_EXPECTS(
(col_type != type_id::DECIMAL64) or (_metadata.get_col_type(col.id).precision <= 18),
"Decimal data has precision > 18, Decimal64 data type doesn't support it.");
if (col_type == type_id::DECIMAL64) {
// sign of the scale is changed since cuDF follows c++ libraries like CNL
// which uses negative scaling, but liborc and other libraries
// follow positive scaling.
auto const scale =
-static_cast<size_type>(_metadata.get_col_type(col.id).scale.value_or(0));
column_types.emplace_back(col_type, scale);
} else {
column_types.emplace_back(col_type);
}
// Map each ORC column to its column
_col_meta.orc_col_map[level][col.id] = column_types.size() - 1;
// TODO: Once MAP type is supported in cuDF, update this for MAP as well
if (col_type == type_id::LIST or col_type == type_id::STRUCT) nested_col.emplace_back(col);
}
// If no rows or stripes to read, return empty columns
if (num_rows <= 0 || selected_stripes.empty()) {
std::transform(selected_columns.levels[0].begin(),
selected_columns.levels[0].end(),
std::back_inserter(out_columns),
[&](auto const col_meta) {
schema_info.emplace_back("");
return create_empty_column(col_meta.id, schema_info.back(), stream);
});
break;
} else {
// Get the total number of stripes across all input files.
size_t total_num_stripes =
std::accumulate(selected_stripes.begin(),
selected_stripes.end(),
0,
[](size_t sum, auto& stripe_source_mapping) {
return sum + stripe_source_mapping.stripe_info.size();
});
const auto num_columns = columns_level.size();
cudf::detail::hostdevice_2dvector<gpu::ColumnDesc> chunks(
total_num_stripes, num_columns, stream);
memset(chunks.base_host_ptr(), 0, chunks.memory_size());
const bool use_index =
(_use_index == true) &&
// Do stripes have row group index
_metadata.is_row_grp_idx_present() &&
// Only use if we don't have much work with complete columns & stripes
// TODO: Consider nrows, gpu, and tune the threshold
(num_rows > _metadata.get_row_index_stride() && !(_metadata.get_row_index_stride() & 7) &&
_metadata.get_row_index_stride() > 0 && num_columns * total_num_stripes < 8 * 128) &&
// Only use if first row is aligned to a stripe boundary
// TODO: Fix logic to handle unaligned rows
(skip_rows == 0);
// Logically view streams as columns
std::vector<orc_stream_info> stream_info;
null_count_prefix_sums.emplace_back();
null_count_prefix_sums.back().reserve(selected_columns.levels[level].size());
std::generate_n(std::back_inserter(null_count_prefix_sums.back()),
selected_columns.levels[level].size(),
[&]() {
return cudf::detail::make_zeroed_device_uvector_async<uint32_t>(
total_num_stripes, stream);
});
// Tracker for eventually deallocating compressed and uncompressed data
auto& stripe_data = lvl_stripe_data[level];
size_t stripe_start_row = 0;
size_t num_dict_entries = 0;
size_t num_rowgroups = 0;
int stripe_idx = 0;
std::vector<std::pair<std::future<size_t>, size_t>> read_tasks;
for (auto const& stripe_source_mapping : selected_stripes) {
// Iterate through the source files selected stripes
for (auto const& stripe : stripe_source_mapping.stripe_info) {
const auto stripe_info = stripe.first;
const auto stripe_footer = stripe.second;
auto stream_count = stream_info.size();
const auto total_data_size = gather_stream_info(stripe_idx,
stripe_info,
stripe_footer,
_col_meta.orc_col_map[level],
_metadata.get_types(),
use_index,
&num_dict_entries,
chunks,
stream_info,
level == 0);
if (total_data_size == 0) {
CUDF_EXPECTS(stripe_info->indexLength == 0, "Invalid index rowgroup stream data");
// In case ROW GROUP INDEX is not present and all columns are structs with no null
// stream, there is nothing to read at this level.
auto fn_check_dtype = [](auto dtype) { return dtype.id() == type_id::STRUCT; };
CUDF_EXPECTS(std::all_of(column_types.begin(), column_types.end(), fn_check_dtype),
"Expected streams data within stripe");
is_data_empty = true;
}
stripe_data.emplace_back(total_data_size, stream);
auto dst_base = static_cast<uint8_t*>(stripe_data.back().data());
// Coalesce consecutive streams into one read
while (not is_data_empty and stream_count < stream_info.size()) {
const auto d_dst = dst_base + stream_info[stream_count].dst_pos;
const auto offset = stream_info[stream_count].offset;
auto len = stream_info[stream_count].length;
stream_count++;
while (stream_count < stream_info.size() &&
stream_info[stream_count].offset == offset + len) {
len += stream_info[stream_count].length;
stream_count++;
}
if (_metadata.per_file_metadata[stripe_source_mapping.source_idx]
.source->is_device_read_preferred(len)) {
read_tasks.push_back(
std::make_pair(_metadata.per_file_metadata[stripe_source_mapping.source_idx]
.source->device_read_async(offset, len, d_dst, stream),
len));
} else {
const auto buffer =
_metadata.per_file_metadata[stripe_source_mapping.source_idx].source->host_read(
offset, len);
CUDF_EXPECTS(buffer->size() == len, "Unexpected discrepancy in bytes read.");
CUDA_TRY(cudaMemcpyAsync(
d_dst, buffer->data(), len, cudaMemcpyHostToDevice, stream.value()));
stream.synchronize();
}
}
const auto num_rows_per_stripe = stripe_info->numberOfRows;
const auto rowgroup_id = num_rowgroups;
auto stripe_num_rowgroups = 0;
if (use_index) {
stripe_num_rowgroups = (num_rows_per_stripe + _metadata.get_row_index_stride() - 1) /
_metadata.get_row_index_stride();
}
// Update chunks to reference streams pointers
for (size_t col_idx = 0; col_idx < num_columns; col_idx++) {
auto& chunk = chunks[stripe_idx][col_idx];
// start row, number of rows in a each stripe and total number of rows
// may change in lower levels of nesting
chunk.start_row = (level == 0)
? stripe_start_row
: _col_meta.child_start_row[stripe_idx * num_columns + col_idx];
chunk.num_rows =
(level == 0)
? stripe_info->numberOfRows
: _col_meta.num_child_rows_per_stripe[stripe_idx * num_columns + col_idx];
chunk.column_num_rows = (level == 0) ? num_rows : _col_meta.num_child_rows[col_idx];
chunk.parent_validity_info =
(level == 0) ? column_validity_info{} : _col_meta.parent_column_data[col_idx];
chunk.parent_null_count_prefix_sums =
(level == 0)
? nullptr
: null_count_prefix_sums[level - 1][_col_meta.parent_column_index[col_idx]].data();
chunk.encoding_kind = stripe_footer->columns[columns_level[col_idx].id].kind;
chunk.type_kind = _metadata.per_file_metadata[stripe_source_mapping.source_idx]
.ff.types[columns_level[col_idx].id]
.kind;
// num_child_rows for a struct column will be same, for other nested types it will be
// calculated.
chunk.num_child_rows = (chunk.type_kind != orc::STRUCT) ? 0 : chunk.num_rows;
auto const decimal_as_float64 = should_convert_decimal_column_to_float(
_decimal_cols_as_float, _metadata.per_file_metadata[0], columns_level[col_idx].id);
chunk.decimal_scale = _metadata.per_file_metadata[stripe_source_mapping.source_idx]
.ff.types[columns_level[col_idx].id]
.scale.value_or(0) |
(decimal_as_float64 ? orc::gpu::orc_decimal2float64_scale : 0);
chunk.rowgroup_id = rowgroup_id;
chunk.dtype_len = (column_types[col_idx].id() == type_id::STRING)
? sizeof(string_index_pair)
: ((column_types[col_idx].id() == type_id::LIST) or
(column_types[col_idx].id() == type_id::STRUCT))
? sizeof(size_type)
: cudf::size_of(column_types[col_idx]);
chunk.num_rowgroups = stripe_num_rowgroups;
if (chunk.type_kind == orc::TIMESTAMP) {
chunk.timestamp_type_id = _timestamp_type.id();
}
if (not is_data_empty) {
for (int k = 0; k < gpu::CI_NUM_STREAMS; k++) {
chunk.streams[k] = dst_base + stream_info[chunk.strm_id[k]].dst_pos;
}
}
}
stripe_start_row += num_rows_per_stripe;
num_rowgroups += stripe_num_rowgroups;
stripe_idx++;
}
}
for (auto& task : read_tasks) {
CUDF_EXPECTS(task.first.get() == task.second, "Unexpected discrepancy in bytes read.");
}
// Process dataset chunk pages into output columns
if (stripe_data.size() != 0) {
auto row_groups =
cudf::detail::hostdevice_2dvector<gpu::RowGroup>(num_rowgroups, num_columns, stream);
if (level > 0 and row_groups.size().first) {
cudf::host_span<gpu::RowGroup> row_groups_span(row_groups.base_host_ptr(),
num_rowgroups * num_columns);
auto& rw_grp_meta = _col_meta.rwgrp_meta;
// Update start row and num rows per row group
std::transform(rw_grp_meta.begin(),
rw_grp_meta.end(),
row_groups_span.begin(),
rw_grp_meta.begin(),
[&](auto meta, auto& row_grp) {
row_grp.num_rows = meta.num_rows;
row_grp.start_row = meta.start_row;
return meta;
});
}
// Setup row group descriptors if using indexes
if (_metadata.per_file_metadata[0].ps.compression != orc::NONE and not is_data_empty) {
auto decomp_data =
decompress_stripe_data(chunks,
stripe_data,
_metadata.per_file_metadata[0].decompressor.get(),
stream_info,
total_num_stripes,
row_groups,
_metadata.get_row_index_stride(),
level == 0,
stream);
stripe_data.clear();
stripe_data.push_back(std::move(decomp_data));
} else {
if (row_groups.size().first) {
chunks.host_to_device(stream);
row_groups.host_to_device(stream);
gpu::ParseRowGroupIndex(row_groups.base_device_ptr(),
nullptr,
chunks.base_device_ptr(),
num_columns,
total_num_stripes,
num_rowgroups,
_metadata.get_row_index_stride(),
level == 0,
stream);
}
}
for (size_t i = 0; i < column_types.size(); ++i) {
bool is_nullable = false;
for (size_t j = 0; j < total_num_stripes; ++j) {
if (chunks[j][i].strm_len[gpu::CI_PRESENT] != 0) {
is_nullable = true;
break;
}
}
auto is_list_type = (column_types[i].id() == type_id::LIST);
auto n_rows = (level == 0) ? num_rows : _col_meta.num_child_rows[i];
// For list column, offset column will be always size + 1
if (is_list_type) n_rows++;
out_buffers[level].emplace_back(column_types[i], n_rows, is_nullable, stream, _mr);
}
if (not is_data_empty) {
decode_stream_data(chunks,
num_dict_entries,
skip_rows,
tz_table.view(),
row_groups,
_metadata.get_row_index_stride(),
out_buffers[level],
level,
stream);
}
// Extract information to process nested child columns
if (nested_col.size()) {
if (not is_data_empty) {
scan_null_counts(chunks, null_count_prefix_sums[level], stream);
}
row_groups.device_to_host(stream, true);
aggregate_child_meta(chunks, row_groups, out_buffers[level], nested_col, level);
}
// ORC stores number of elements at each row, so we need to generate offsets from that
if (nested_col.size()) {
std::vector<list_buffer_data> buff_data;
std::for_each(
out_buffers[level].begin(), out_buffers[level].end(), [&buff_data](auto& out_buffer) {
if (out_buffer.type.id() == type_id::LIST) {
auto data = static_cast<size_type*>(out_buffer.data());
buff_data.emplace_back(list_buffer_data{data, out_buffer.size});
}
});
if (buff_data.size()) {
auto const dev_buff_data = cudf::detail::make_device_uvector_async(buff_data, stream);
generate_offsets_for_list(dev_buff_data, stream);
}
}
}
}
}
// If out_columns is empty, then create columns from buffer.
if (out_columns.empty()) {
create_columns(std::move(out_buffers), out_columns, schema_info, stream);
}
// Return column names (must match order of returned columns)
out_metadata.column_names.reserve(schema_info.size());
std::transform(schema_info.cbegin(),
schema_info.cend(),
std::back_inserter(out_metadata.column_names),
[](auto info) { return info.name; });
out_metadata.schema_info = std::move(schema_info);
for (const auto& meta : _metadata.per_file_metadata) {
for (const auto& kv : meta.ff.metadata) {
out_metadata.user_data.insert({kv.name, kv.value});
}
}
return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)};
}
// Forward to implementation
reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>>&& sources,
orc_reader_options const& options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
_impl = std::make_unique<impl>(std::move(sources), options, mr);
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read(orc_reader_options const& options, rmm::cuda_stream_view stream)
{
return _impl->read(
options.get_skip_rows(), options.get_num_rows(), options.get_stripes(), stream);
}
} // namespace orc
} // namespace detail
} // namespace io
} // namespace cudf
|
81b1736f059a2b1cd8776aa73ae08ae6750f2a2b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "nodes/abs.h"
#include "core/common_cu.h"
__global__
void AbsKernelForward(const int n, const float * __restrict__ x, float * __restrict__ y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
y[i] = fabs(x[i]);
}
__global__
void AbsKernelBackward(const int n, const float *x, const float *diff, float *out)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
out[i] = ((x[i] == 0) ? 0 : (x[i] > 0? diff[i] : - diff[i]));
}
Abs::Abs(deepflow::NodeParam *param) : Node(param) {
LOG_IF(FATAL, param->has_abs_param() == false) << "param.has_abs_param() == false";
}
void Abs::init() {
_outputs[0]->initValue(_inputs[0]->value()->dims());
_outputs[0]->initDiff();
}
void Abs::forward() {
auto size = _inputs[0]->value()->size();
AbsKernelForward << < numOfBlocks(size), maxThreadsPerBlock >> > (size, _inputs[0]->value()->gpu_data(), (float*)_outputs[0]->value()->gpu_data());
DF_KERNEL_CHECK();
}
void Abs::backward() {
if (_inputs[0]->diff()) {
auto size = _inputs[0]->value()->size();
AbsKernelBackward << < numOfBlocks(size), maxThreadsPerBlock >> > (size, _inputs[0]->value()->gpu_data(), _outputs[0]->diff()->gpu_data(), (float*)_inputs[0]->diff()->gpu_data());
DF_KERNEL_CHECK();
}
}
std::string Abs::to_cpp() const
{
std::string cpp = "auto " + _name + " = df.square(" + _input_name_for_cpp(0) + ", ";
cpp += "\"" + _name + "\");";
return cpp;
}
|
81b1736f059a2b1cd8776aa73ae08ae6750f2a2b.cu
|
#include "nodes/abs.h"
#include "core/common_cu.h"
__global__
void AbsKernelForward(const int n, const float * __restrict__ x, float * __restrict__ y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
y[i] = fabs(x[i]);
}
__global__
void AbsKernelBackward(const int n, const float *x, const float *diff, float *out)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
out[i] = ((x[i] == 0) ? 0 : (x[i] > 0? diff[i] : - diff[i]));
}
Abs::Abs(deepflow::NodeParam *param) : Node(param) {
LOG_IF(FATAL, param->has_abs_param() == false) << "param.has_abs_param() == false";
}
void Abs::init() {
_outputs[0]->initValue(_inputs[0]->value()->dims());
_outputs[0]->initDiff();
}
void Abs::forward() {
auto size = _inputs[0]->value()->size();
AbsKernelForward << < numOfBlocks(size), maxThreadsPerBlock >> > (size, _inputs[0]->value()->gpu_data(), (float*)_outputs[0]->value()->gpu_data());
DF_KERNEL_CHECK();
}
void Abs::backward() {
if (_inputs[0]->diff()) {
auto size = _inputs[0]->value()->size();
AbsKernelBackward << < numOfBlocks(size), maxThreadsPerBlock >> > (size, _inputs[0]->value()->gpu_data(), _outputs[0]->diff()->gpu_data(), (float*)_inputs[0]->diff()->gpu_data());
DF_KERNEL_CHECK();
}
}
std::string Abs::to_cpp() const
{
std::string cpp = "auto " + _name + " = df.square(" + _input_name_for_cpp(0) + ", ";
cpp += "\"" + _name + "\");";
return cpp;
}
|
14eb621dd256d6e4712c46a7727ee119181d66ad.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
//NI DAQ
#include "../include/ContAcq-IntClk.h"
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
// Variables
bool noprompt = false;
unsigned int my_timer;
texture<float,1,hipReadModeElementType> texmem1;
texture<float,1,hipReadModeElementType> texmem2;
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
float sum = 0.0;
float mult = 2.5;
if(tid < N){
for(unsigned i=0; i<ITERATIONS; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum*=mult;
sum*=mult;
sum*=mult;
sum*=mult;
sum*=mult;
sum*=mult;
A[tid*2] = tex1Dfetch(texmem2,tid)*B[tid]+sum;
B[tid] = A[tid*2]+A[tid];
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int N)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<ITERATIONS*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(N*sizeof(float));
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
hipMalloc((void**) &device_texture1, N);
hipMalloc((void**) &device_texture2, N);
hipMemcpy(device_texture1, host_texture1, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture2, host_texture1, N*sizeof(float), hipMemcpyHostToDevice);
hipBindTexture(0, texmem1, device_texture1, N);
hipBindTexture(0, texmem2, device_texture2, N);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A1, size1) );
checkCudaErrors( hipMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A1, h_A1, size1, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_A2, h_A2, size1, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
CUDA_SAFE_CALL( hipDeviceSynchronize() );
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A1, d_A2, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
hipFree(d_A1);
if (d_A2)
hipFree(d_A2);
if (d_A3)
hipFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
14eb621dd256d6e4712c46a7727ee119181d66ad.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
//NI DAQ
#include "../include/ContAcq-IntClk.h"
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
// Variables
bool noprompt = false;
unsigned int my_timer;
texture<float,1,cudaReadModeElementType> texmem1;
texture<float,1,cudaReadModeElementType> texmem2;
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
float sum = 0.0;
float mult = 2.5;
if(tid < N){
for(unsigned i=0; i<ITERATIONS; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum*=mult;
sum*=mult;
sum*=mult;
sum*=mult;
sum*=mult;
sum*=mult;
A[tid*2] = tex1Dfetch(texmem2,tid)*B[tid]+sum;
B[tid] = A[tid*2]+A[tid];
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int N)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<ITERATIONS*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(N*sizeof(float));
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
cudaMalloc((void**) &device_texture1, N);
cudaMalloc((void**) &device_texture2, N);
cudaMemcpy(device_texture1, host_texture1, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture2, host_texture1, N*sizeof(float), cudaMemcpyHostToDevice);
cudaBindTexture(0, texmem1, device_texture1, N);
cudaBindTexture(0, texmem2, device_texture2, N);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A1, size1) );
checkCudaErrors( cudaMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A1, h_A1, size1, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_A2, h_A2, size1, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
CUDA_SAFE_CALL( cudaThreadSynchronize() );
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A1, d_A2, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
cudaFree(d_A1);
if (d_A2)
cudaFree(d_A2);
if (d_A3)
cudaFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
b74fa7092fb10a4604e96ec1c42cee6c1a290a65.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "Configuration.h"
#include "BoundingBox.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "Vector3.h"
#include "Ray.h"
#include "BenchmarkTimer.h"
#include "KdTreeGpu.h"
#include "Result.h"
extern BenchmarkTimer gpuTotalTime;
extern BenchmarkTimer gpuTransferTime;
void CUDA_VALIDATE(hipError_t e)
{
if(e != hipSuccess)
{
fprintf(stderr, "Cuda Error: %d\n Exiting.", e);
exit(e);
}
}
__device__ double getField(const Vector3 & vector, Axis a)
{
switch(a)
{
case X:
return vector.x;
case Y:
return vector.y;
}
return vector.z;
}
/*
Returns the position of the ray o+td at point t, moved just sligtly in the direction of reflectedDirection
to avoid to hit the same location again
*/
__device__ Vector3 rayPosition(const Vector3 & rayOrigin, const Vector3 & rayDirection, const double t, const Vector3 & reflectedDirection)
{
Vector3 a = rayOrigin;
a.x += t*rayDirection.x + 0.00001*reflectedDirection.x;
a.y += t*rayDirection.y + 0.00001*reflectedDirection.y;
a.z += t*rayDirection.z + 0.00001*reflectedDirection.z;
return a;
}
/*
Reflect function
Reflect this direction along normal, returns reflected vector
*/
__device__ double vectorDot(const Vector3& a, const Vector3& b)
{
return a.x*b.x + a.y*b.y + a.z*b.z;
}
__device__ Vector3 reflectVector(const Vector3& direction, const Vector3 & normal)
{
Vector3 result = direction;
double dotP = vectorDot(direction, normal);
result.x = direction.x - 2 * normal.x * dotP;
result.y = direction.y - 2 * normal.y * dotP;
result.z = direction.z - 2 * normal.z * dotP;
return result;
}
/*
Find triangle intersections
*/
__device__ Vector3 rtIntersectionfindPoint(const Vector3 & a, const Vector3 & b, const double bScale, const Vector3 & c)
{
Vector3 out = a;
out.x = a.x + b.x*bScale - c.x;
out.y = a.y + b.y*bScale - c.y;
out.z = a.z + b.z*bScale - c.z;
return out;
}
__device__ bool rayTriangleIntersection(const Vector3 & rayOrigin, const Vector3 & rayDirection, double tNear, double tFar,
const TrianglePanelPair & triangle, double & tOut, Vector3 & reflection)
{
Triangle tri = triangle.tri;
double d = 1.0/(getField(rayDirection, tri.nAxis) + tri.nu * getField(rayDirection, tri.uAxis) + tri.nv * getField(rayDirection, tri.vAxis));
double t = (tri.nd - (getField(rayOrigin, tri.nAxis) + tri.nu * getField(rayOrigin, tri.uAxis) + tri.nv * getField(rayOrigin, tri.vAxis))) * d;
if(t >= tNear && t <= tFar)
{
Vector3 P = rtIntersectionfindPoint(rayOrigin, rayDirection, t, tri.p1);
double Pu = getField(P, tri.uAxis);
double Pv = getField(P, tri.vAxis);
double beta = Pv * tri.bun + Pu * tri.bvn;
if(beta < 0)
{
return false;
}
double gamma = Pu * tri.cun + Pv * tri.cvn;
if(gamma < 0 || gamma + beta > 1)
{
return false;
}
tOut = t;
reflection = reflectVector(rayDirection, tri.normal_normalized);
return true;
}
return false;
}
__device__ bool rayTrianglesIntersection(const Vector3 & rayOrigin, const Vector3 & rayDirection, double & tNear, double & tFar,
TrianglePanelPair* triangles, int trianglesListFrom, int trianglesListTo,
double & t, int & hitIndex, Vector3 & reflection)
{
int triangleIndexCandidate = -1;
double tMinCandidate = 1E999;
Vector3 reflectionCandidate;
for(int i = trianglesListFrom; i < trianglesListTo; i++)
{
double tempT;
Vector3 tempReflection;
if(rayTriangleIntersection(rayOrigin, rayDirection, tNear, tFar, triangles[i], tempT, tempReflection))
{
if(tempT < tMinCandidate)
{
triangleIndexCandidate = i;
tMinCandidate = tempT;
reflectionCandidate = tempReflection;
}
}
}
if(triangleIndexCandidate >= 0)
{
t = tMinCandidate;
hitIndex = triangleIndexCandidate;
reflection = reflectionCandidate;
return true;
}
return false;
}
/*
Intersects a Bounding Box with a ray. If the ray intersects, then we know that tNear < tFar. If it does not
exist, then we make sure the opposite is true.
*/
__device__ double2 rayBoxIntersection( BoundingBox sceneBB, const Vector3 & rayOrigin, const Vector3 & rayDirection)
{
double2 tNearFar;
// X
double divx = 1 / rayDirection.x;
double tNear, tFar;
if (divx >= 0)
{
tNear = (sceneBB.mi.x- rayOrigin.x) * divx;
tFar = (sceneBB.ma.x - rayOrigin.x) * divx;
}
else
{
tNear = (sceneBB.ma.x - rayOrigin.x) * divx;
tFar = (sceneBB.mi.x - rayOrigin.x) * divx;
}
if(tFar < tNear )
{
tNearFar.x = 1;
tNearFar.y = 0;
return tNearFar;
}
// Y
double divy = 1 / rayDirection.y;
double tyNear, tyFar;
if (divy >= 0)
{
tyNear = (sceneBB.mi.y- rayOrigin.y) * divy;
tyFar = (sceneBB.ma.y - rayOrigin.y) * divy;
}
else {
tyNear = (sceneBB.ma.y - rayOrigin.y) * divy;
tyFar = (sceneBB.mi.y - rayOrigin.y) * divy;
}
if(tyFar < tyNear )
{
tNearFar.x = 1;
tNearFar.y = 0;
return tNearFar;
}
if(tyNear > tNear)
{
tNear = tyNear;
}
if(tyFar < tFar)
{
tFar = tyFar;
}
// Z
double divz = 1 / rayDirection.z;
double tzNear, tzFar;
if (divz >= 0)
{
tzNear = (sceneBB.mi.z- rayOrigin.z) * divz;
tzFar = (sceneBB.ma.z - rayOrigin.z) * divz;
}
else {
tzNear = (sceneBB.ma.z - rayOrigin.z) * divz;
tzFar = (sceneBB.mi.z - rayOrigin.z) * divz;
}
if(tzFar < tzNear )
{
tNearFar.x = 1;
tNearFar.y = 0;
return tNearFar;
}
if(tzNear > tNear)
{
tNear = tzNear;
}
if(tzFar < tFar)
{
tFar = tzFar;
}
if(tNear > 1E200 || tFar < 0 )
{
tNearFar.x = 1;
tNearFar.y = 0;
return tNearFar;
}
tNearFar.x = tNear;
tNearFar.y = tFar;
return tNearFar;
}
/*
Entry Kernel
*/
__global__ void kdTreeTraversal(BoundingBox sceneBB, Ray* rays, KdTreeGpuNode* nodes, TrianglePanelPair* triangles,
int firstRayIndex, int numRays, int rank, int rootNode, int* hitPanelOut, GpuResult* results)
{
unsigned int index = blockIdx.x*blockDim.x + threadIdx.x;
if(index < numRays)
{
if(rank > 0 && hitPanelOut[index] == -1)
{
return;
}
hitPanelOut[index] = -1;
Ray myRay = rays[index];
Vector3 rayOrigin = myRay.origin;
Vector3 rayDirection = myRay.direction;
double2 tNearFar = rayBoxIntersection(sceneBB, rayOrigin, rayDirection);
double tNear = tNearFar.x;
double tFar = tNearFar.y;
const double globalTFar = tFar;
double pushdownNode = rootNode;
bool pushDown = true;
if(tNear < tFar) // Ray intersects box
{
if(tNear < 0)
{
tNear = 0;
}
int currentNode = rootNode;
while(true)
{
while(nodes[currentNode].left != -1)
{
double nodeS = nodes[currentNode].s;
Axis nodeAxis = nodes[currentNode].axis;
double rayOriginAxis = getField(rayOrigin, nodeAxis);
double rayDirectionAxis = getField(rayDirection, nodeAxis);
double tSplit = (nodeS - rayOriginAxis) / rayDirectionAxis;
int nearNode = nodes[currentNode].left, farNode = nodes[currentNode].right;
if(rayDirectionAxis < 0)
{
int temp = nearNode;
nearNode = farNode;
farNode = temp;
}
// Evaluate children nodes
if (tSplit >= tFar) // Near
{
currentNode = nearNode;
}
else if (tSplit <= tNear) // Far
{
currentNode = farNode;
}
else // Near then Far
{
currentNode = nearNode;
tFar = tSplit;
pushDown = false;
}
if(pushDown)
{
pushdownNode = currentNode;
}
}
// Check Triangles for Intersection
int trianglesListFrom = nodes[currentNode].globalListFirst;
int trianglesListSize = nodes[currentNode].globalListSize;
double tClosest;
int hitPanelIndex;
Vector3 reflection;
if(rayTrianglesIntersection(rayOrigin, rayDirection, tNear, tFar,
triangles, trianglesListFrom, trianglesListFrom+trianglesListSize,
tClosest, hitPanelIndex, reflection))
{
if(tClosest >= tNear && tClosest <= tFar )
{
// Check that we have hit on the correct side of closest triangle
if(vectorDot(triangles[hitPanelIndex].tri.normal_normalized, rayDirection) < 0)
{
// Return result hit
Ray reflectedRay;
reflectedRay.origin = rayPosition(rayOrigin, rayDirection, tClosest, reflection);
reflectedRay.direction = reflection;
hitPanelOut[index] = hitPanelIndex;
results[index].reflectedRay = reflectedRay;
results[index].t = tClosest;
results[index].panel = triangles[hitPanelIndex].panel;
rays[index] = reflectedRay;
}
return;
}
}
// Continue Search
if(tFar == globalTFar)
{
return;
}
else
{
currentNode = pushdownNode;
tNear = tFar;
tFar = globalTFar;
}
}
}
}
}
/*
Start tracing of a RayList on the GPU. The result can later be fetched, so CPU work can be
done in the mean time.
*/
void KdTreeGpu::traceRaysOnGpuAsync(int firstRayIndex, int numRays, int rank, int buffer)
{
int per_block = 128;
int num_blocks = numRays/per_block + (numRays%per_block==0?0:1);
Ray* rays = &this->deviceRayPtr[firstRayIndex];
int* deviceLocalHitPanelIds = &this->deviceHitPanelIdPtr[firstRayIndex];
hipLaunchKernelGGL(( kdTreeTraversal), dim3(num_blocks), dim3(per_block), 0, 0, sceneBoundingBox, rays, deviceNodesPtr, deviceTrianglesListPtr,
firstRayIndex, numRays, rank, rootNodeIndex,
deviceLocalHitPanelIds, deviceResults);
hipStreamQuery(0);
CUDA_VALIDATE(hipMemcpyAsync(resultHitPanelIds[buffer], deviceLocalHitPanelIds, numRays*sizeof(int), hipMemcpyDeviceToHost));
CUDA_VALIDATE(hipMemcpyAsync(results[buffer], deviceResults, numRays*sizeof(GpuResult), hipMemcpyDeviceToHost));
hipStreamQuery(0);
}
void KdTreeGpu::transferRaysToGpu(RayList & rays)
{
gpuTransferTime.start();
CUDA_VALIDATE(hipMalloc(&this->deviceRayPtr, rays.size()*sizeof(Ray)));
CUDA_VALIDATE(hipMalloc(&this->deviceHitPanelIdPtr, rays.size()*sizeof(int)));
CUDA_VALIDATE(hipMemcpyAsync(this->deviceRayPtr, &rays[0], rays.size()*sizeof(Ray), hipMemcpyHostToDevice));
gpuTransferTime.stop();
}
void KdTreeGpu::waitForGpu()
{
CUDA_VALIDATE(hipDeviceSynchronize());
}
/*
Allocating pinned memory on the host that can be accessed asynchronously by the GPU
*/
void KdTreeGpu::allocateHostResultBuffers()
{
unsigned int flag = hipHostMallocPortable;
CUDA_VALIDATE(hipHostMalloc(&resultHitPanelIds[0], MAX_RAYS_PER_ITERATION*sizeof(int), flag));
CUDA_VALIDATE(hipHostMalloc(&resultHitPanelIds[1], MAX_RAYS_PER_ITERATION*sizeof(int), flag));
CUDA_VALIDATE(hipHostMalloc(&results[0], MAX_RAYS_PER_ITERATION*sizeof(GpuResult), flag));
CUDA_VALIDATE(hipHostMalloc(&results[1], MAX_RAYS_PER_ITERATION*sizeof(GpuResult), flag));
}
void KdTreeGpu::freeHostResultBuffers()
{
CUDA_VALIDATE(hipHostFree(resultHitPanelIds[0]));
CUDA_VALIDATE(hipHostFree(resultHitPanelIds[1]));
CUDA_VALIDATE(hipHostFree(results[0]));
CUDA_VALIDATE(hipHostFree(results[1]));
}
/*
Transfer the triangle list and the kd tree nodes to the GPU
*/
void KdTreeGpu::transferNodesToGpu()
{
gpuTransferTime.start();
CUDA_VALIDATE(hipMalloc(&this->deviceTrianglesListPtr, trianglesList.size()*sizeof(TrianglePanelPair)));
CUDA_VALIDATE(hipMemcpyAsync(this->deviceTrianglesListPtr, &this->getTriangle(0), trianglesList.size()*sizeof(TrianglePanelPair), hipMemcpyHostToDevice));
CUDA_VALIDATE(hipMalloc(&this->deviceNodesPtr, nodesList.size()*sizeof(KdTreeGpuNode)));
CUDA_VALIDATE(hipMemcpyAsync(this->deviceNodesPtr, &this->getNode(0), nodesList.size()*sizeof(KdTreeGpuNode), hipMemcpyHostToDevice));
gpuTransferTime.stop();
}
void KdTreeGpu::allocateResultGpuMemory()
{
CUDA_VALIDATE(hipMalloc(&this->deviceResults, MAX_RAYS_PER_ITERATION*sizeof(GpuResult)));
}
void KdTreeGpu::freeGpuMemory()
{
if(this->deviceTrianglesListPtr)
{
CUDA_VALIDATE(hipFree(this->deviceTrianglesListPtr));
}
if(this->deviceNodesPtr)
{
CUDA_VALIDATE(hipFree(this->deviceNodesPtr));
}
if(this->deviceRayPtr)
{
CUDA_VALIDATE(hipFree(this->deviceRayPtr));
}
CUDA_VALIDATE(hipFree(deviceHitPanelIdPtr));
CUDA_VALIDATE(hipFree(deviceResults));
}
void KdTreeGpu::setupCuda()
{
hipSetDeviceFlags(hipDeviceMapHost);
}
|
b74fa7092fb10a4604e96ec1c42cee6c1a290a65.cu
|
#include "Configuration.h"
#include "BoundingBox.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "Vector3.h"
#include "Ray.h"
#include "BenchmarkTimer.h"
#include "KdTreeGpu.h"
#include "Result.h"
extern BenchmarkTimer gpuTotalTime;
extern BenchmarkTimer gpuTransferTime;
void CUDA_VALIDATE(cudaError_t e)
{
if(e != cudaSuccess)
{
fprintf(stderr, "Cuda Error: %d\n Exiting.", e);
exit(e);
}
}
__device__ double getField(const Vector3 & vector, Axis a)
{
switch(a)
{
case X:
return vector.x;
case Y:
return vector.y;
}
return vector.z;
}
/*
Returns the position of the ray o+td at point t, moved just sligtly in the direction of reflectedDirection
to avoid to hit the same location again
*/
__device__ Vector3 rayPosition(const Vector3 & rayOrigin, const Vector3 & rayDirection, const double t, const Vector3 & reflectedDirection)
{
Vector3 a = rayOrigin;
a.x += t*rayDirection.x + 0.00001*reflectedDirection.x;
a.y += t*rayDirection.y + 0.00001*reflectedDirection.y;
a.z += t*rayDirection.z + 0.00001*reflectedDirection.z;
return a;
}
/*
Reflect function
Reflect this direction along normal, returns reflected vector
*/
__device__ double vectorDot(const Vector3& a, const Vector3& b)
{
return a.x*b.x + a.y*b.y + a.z*b.z;
}
__device__ Vector3 reflectVector(const Vector3& direction, const Vector3 & normal)
{
Vector3 result = direction;
double dotP = vectorDot(direction, normal);
result.x = direction.x - 2 * normal.x * dotP;
result.y = direction.y - 2 * normal.y * dotP;
result.z = direction.z - 2 * normal.z * dotP;
return result;
}
/*
Find triangle intersections
*/
__device__ Vector3 rtIntersectionfindPoint(const Vector3 & a, const Vector3 & b, const double bScale, const Vector3 & c)
{
Vector3 out = a;
out.x = a.x + b.x*bScale - c.x;
out.y = a.y + b.y*bScale - c.y;
out.z = a.z + b.z*bScale - c.z;
return out;
}
__device__ bool rayTriangleIntersection(const Vector3 & rayOrigin, const Vector3 & rayDirection, double tNear, double tFar,
const TrianglePanelPair & triangle, double & tOut, Vector3 & reflection)
{
Triangle tri = triangle.tri;
double d = 1.0/(getField(rayDirection, tri.nAxis) + tri.nu * getField(rayDirection, tri.uAxis) + tri.nv * getField(rayDirection, tri.vAxis));
double t = (tri.nd - (getField(rayOrigin, tri.nAxis) + tri.nu * getField(rayOrigin, tri.uAxis) + tri.nv * getField(rayOrigin, tri.vAxis))) * d;
if(t >= tNear && t <= tFar)
{
Vector3 P = rtIntersectionfindPoint(rayOrigin, rayDirection, t, tri.p1);
double Pu = getField(P, tri.uAxis);
double Pv = getField(P, tri.vAxis);
double beta = Pv * tri.bun + Pu * tri.bvn;
if(beta < 0)
{
return false;
}
double gamma = Pu * tri.cun + Pv * tri.cvn;
if(gamma < 0 || gamma + beta > 1)
{
return false;
}
tOut = t;
reflection = reflectVector(rayDirection, tri.normal_normalized);
return true;
}
return false;
}
__device__ bool rayTrianglesIntersection(const Vector3 & rayOrigin, const Vector3 & rayDirection, double & tNear, double & tFar,
TrianglePanelPair* triangles, int trianglesListFrom, int trianglesListTo,
double & t, int & hitIndex, Vector3 & reflection)
{
int triangleIndexCandidate = -1;
double tMinCandidate = 1E999;
Vector3 reflectionCandidate;
for(int i = trianglesListFrom; i < trianglesListTo; i++)
{
double tempT;
Vector3 tempReflection;
if(rayTriangleIntersection(rayOrigin, rayDirection, tNear, tFar, triangles[i], tempT, tempReflection))
{
if(tempT < tMinCandidate)
{
triangleIndexCandidate = i;
tMinCandidate = tempT;
reflectionCandidate = tempReflection;
}
}
}
if(triangleIndexCandidate >= 0)
{
t = tMinCandidate;
hitIndex = triangleIndexCandidate;
reflection = reflectionCandidate;
return true;
}
return false;
}
/*
Intersects a Bounding Box with a ray. If the ray intersects, then we know that tNear < tFar. If it does not
exist, then we make sure the opposite is true.
*/
__device__ double2 rayBoxIntersection( BoundingBox sceneBB, const Vector3 & rayOrigin, const Vector3 & rayDirection)
{
double2 tNearFar;
// X
double divx = 1 / rayDirection.x;
double tNear, tFar;
if (divx >= 0)
{
tNear = (sceneBB.mi.x- rayOrigin.x) * divx;
tFar = (sceneBB.ma.x - rayOrigin.x) * divx;
}
else
{
tNear = (sceneBB.ma.x - rayOrigin.x) * divx;
tFar = (sceneBB.mi.x - rayOrigin.x) * divx;
}
if(tFar < tNear )
{
tNearFar.x = 1;
tNearFar.y = 0;
return tNearFar;
}
// Y
double divy = 1 / rayDirection.y;
double tyNear, tyFar;
if (divy >= 0)
{
tyNear = (sceneBB.mi.y- rayOrigin.y) * divy;
tyFar = (sceneBB.ma.y - rayOrigin.y) * divy;
}
else {
tyNear = (sceneBB.ma.y - rayOrigin.y) * divy;
tyFar = (sceneBB.mi.y - rayOrigin.y) * divy;
}
if(tyFar < tyNear )
{
tNearFar.x = 1;
tNearFar.y = 0;
return tNearFar;
}
if(tyNear > tNear)
{
tNear = tyNear;
}
if(tyFar < tFar)
{
tFar = tyFar;
}
// Z
double divz = 1 / rayDirection.z;
double tzNear, tzFar;
if (divz >= 0)
{
tzNear = (sceneBB.mi.z- rayOrigin.z) * divz;
tzFar = (sceneBB.ma.z - rayOrigin.z) * divz;
}
else {
tzNear = (sceneBB.ma.z - rayOrigin.z) * divz;
tzFar = (sceneBB.mi.z - rayOrigin.z) * divz;
}
if(tzFar < tzNear )
{
tNearFar.x = 1;
tNearFar.y = 0;
return tNearFar;
}
if(tzNear > tNear)
{
tNear = tzNear;
}
if(tzFar < tFar)
{
tFar = tzFar;
}
if(tNear > 1E200 || tFar < 0 )
{
tNearFar.x = 1;
tNearFar.y = 0;
return tNearFar;
}
tNearFar.x = tNear;
tNearFar.y = tFar;
return tNearFar;
}
/*
Entry Kernel
*/
__global__ void kdTreeTraversal(BoundingBox sceneBB, Ray* rays, KdTreeGpuNode* nodes, TrianglePanelPair* triangles,
int firstRayIndex, int numRays, int rank, int rootNode, int* hitPanelOut, GpuResult* results)
{
unsigned int index = blockIdx.x*blockDim.x + threadIdx.x;
if(index < numRays)
{
if(rank > 0 && hitPanelOut[index] == -1)
{
return;
}
hitPanelOut[index] = -1;
Ray myRay = rays[index];
Vector3 rayOrigin = myRay.origin;
Vector3 rayDirection = myRay.direction;
double2 tNearFar = rayBoxIntersection(sceneBB, rayOrigin, rayDirection);
double tNear = tNearFar.x;
double tFar = tNearFar.y;
const double globalTFar = tFar;
double pushdownNode = rootNode;
bool pushDown = true;
if(tNear < tFar) // Ray intersects box
{
if(tNear < 0)
{
tNear = 0;
}
int currentNode = rootNode;
while(true)
{
while(nodes[currentNode].left != -1)
{
double nodeS = nodes[currentNode].s;
Axis nodeAxis = nodes[currentNode].axis;
double rayOriginAxis = getField(rayOrigin, nodeAxis);
double rayDirectionAxis = getField(rayDirection, nodeAxis);
double tSplit = (nodeS - rayOriginAxis) / rayDirectionAxis;
int nearNode = nodes[currentNode].left, farNode = nodes[currentNode].right;
if(rayDirectionAxis < 0)
{
int temp = nearNode;
nearNode = farNode;
farNode = temp;
}
// Evaluate children nodes
if (tSplit >= tFar) // Near
{
currentNode = nearNode;
}
else if (tSplit <= tNear) // Far
{
currentNode = farNode;
}
else // Near then Far
{
currentNode = nearNode;
tFar = tSplit;
pushDown = false;
}
if(pushDown)
{
pushdownNode = currentNode;
}
}
// Check Triangles for Intersection
int trianglesListFrom = nodes[currentNode].globalListFirst;
int trianglesListSize = nodes[currentNode].globalListSize;
double tClosest;
int hitPanelIndex;
Vector3 reflection;
if(rayTrianglesIntersection(rayOrigin, rayDirection, tNear, tFar,
triangles, trianglesListFrom, trianglesListFrom+trianglesListSize,
tClosest, hitPanelIndex, reflection))
{
if(tClosest >= tNear && tClosest <= tFar )
{
// Check that we have hit on the correct side of closest triangle
if(vectorDot(triangles[hitPanelIndex].tri.normal_normalized, rayDirection) < 0)
{
// Return result hit
Ray reflectedRay;
reflectedRay.origin = rayPosition(rayOrigin, rayDirection, tClosest, reflection);
reflectedRay.direction = reflection;
hitPanelOut[index] = hitPanelIndex;
results[index].reflectedRay = reflectedRay;
results[index].t = tClosest;
results[index].panel = triangles[hitPanelIndex].panel;
rays[index] = reflectedRay;
}
return;
}
}
// Continue Search
if(tFar == globalTFar)
{
return;
}
else
{
currentNode = pushdownNode;
tNear = tFar;
tFar = globalTFar;
}
}
}
}
}
/*
Start tracing of a RayList on the GPU. The result can later be fetched, so CPU work can be
done in the mean time.
*/
void KdTreeGpu::traceRaysOnGpuAsync(int firstRayIndex, int numRays, int rank, int buffer)
{
int per_block = 128;
int num_blocks = numRays/per_block + (numRays%per_block==0?0:1);
Ray* rays = &this->deviceRayPtr[firstRayIndex];
int* deviceLocalHitPanelIds = &this->deviceHitPanelIdPtr[firstRayIndex];
kdTreeTraversal<<<num_blocks, per_block, 0>>>(sceneBoundingBox, rays, deviceNodesPtr, deviceTrianglesListPtr,
firstRayIndex, numRays, rank, rootNodeIndex,
deviceLocalHitPanelIds, deviceResults);
cudaStreamQuery(0);
CUDA_VALIDATE(cudaMemcpyAsync(resultHitPanelIds[buffer], deviceLocalHitPanelIds, numRays*sizeof(int), cudaMemcpyDeviceToHost));
CUDA_VALIDATE(cudaMemcpyAsync(results[buffer], deviceResults, numRays*sizeof(GpuResult), cudaMemcpyDeviceToHost));
cudaStreamQuery(0);
}
void KdTreeGpu::transferRaysToGpu(RayList & rays)
{
gpuTransferTime.start();
CUDA_VALIDATE(cudaMalloc(&this->deviceRayPtr, rays.size()*sizeof(Ray)));
CUDA_VALIDATE(cudaMalloc(&this->deviceHitPanelIdPtr, rays.size()*sizeof(int)));
CUDA_VALIDATE(cudaMemcpyAsync(this->deviceRayPtr, &rays[0], rays.size()*sizeof(Ray), cudaMemcpyHostToDevice));
gpuTransferTime.stop();
}
void KdTreeGpu::waitForGpu()
{
CUDA_VALIDATE(cudaDeviceSynchronize());
}
/*
Allocating pinned memory on the host that can be accessed asynchronously by the GPU
*/
void KdTreeGpu::allocateHostResultBuffers()
{
unsigned int flag = cudaHostAllocPortable;
CUDA_VALIDATE(cudaHostAlloc(&resultHitPanelIds[0], MAX_RAYS_PER_ITERATION*sizeof(int), flag));
CUDA_VALIDATE(cudaHostAlloc(&resultHitPanelIds[1], MAX_RAYS_PER_ITERATION*sizeof(int), flag));
CUDA_VALIDATE(cudaHostAlloc(&results[0], MAX_RAYS_PER_ITERATION*sizeof(GpuResult), flag));
CUDA_VALIDATE(cudaHostAlloc(&results[1], MAX_RAYS_PER_ITERATION*sizeof(GpuResult), flag));
}
void KdTreeGpu::freeHostResultBuffers()
{
CUDA_VALIDATE(cudaFreeHost(resultHitPanelIds[0]));
CUDA_VALIDATE(cudaFreeHost(resultHitPanelIds[1]));
CUDA_VALIDATE(cudaFreeHost(results[0]));
CUDA_VALIDATE(cudaFreeHost(results[1]));
}
/*
Transfer the triangle list and the kd tree nodes to the GPU
*/
void KdTreeGpu::transferNodesToGpu()
{
gpuTransferTime.start();
CUDA_VALIDATE(cudaMalloc(&this->deviceTrianglesListPtr, trianglesList.size()*sizeof(TrianglePanelPair)));
CUDA_VALIDATE(cudaMemcpyAsync(this->deviceTrianglesListPtr, &this->getTriangle(0), trianglesList.size()*sizeof(TrianglePanelPair), cudaMemcpyHostToDevice));
CUDA_VALIDATE(cudaMalloc(&this->deviceNodesPtr, nodesList.size()*sizeof(KdTreeGpuNode)));
CUDA_VALIDATE(cudaMemcpyAsync(this->deviceNodesPtr, &this->getNode(0), nodesList.size()*sizeof(KdTreeGpuNode), cudaMemcpyHostToDevice));
gpuTransferTime.stop();
}
void KdTreeGpu::allocateResultGpuMemory()
{
CUDA_VALIDATE(cudaMalloc(&this->deviceResults, MAX_RAYS_PER_ITERATION*sizeof(GpuResult)));
}
void KdTreeGpu::freeGpuMemory()
{
if(this->deviceTrianglesListPtr)
{
CUDA_VALIDATE(cudaFree(this->deviceTrianglesListPtr));
}
if(this->deviceNodesPtr)
{
CUDA_VALIDATE(cudaFree(this->deviceNodesPtr));
}
if(this->deviceRayPtr)
{
CUDA_VALIDATE(cudaFree(this->deviceRayPtr));
}
CUDA_VALIDATE(cudaFree(deviceHitPanelIdPtr));
CUDA_VALIDATE(cudaFree(deviceResults));
}
void KdTreeGpu::setupCuda()
{
cudaSetDeviceFlags(cudaDeviceMapHost);
}
|
561993ba30d0645f3add8c0e8d5531fac99e312b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Matrix multiplication using floats
//Anderson Alberto Ochoa Estupian
//Code: 1053823121
#include<stdio.h>
#include<iostream>
#include<cstdlib>
#include<time.h>
#include<cuda.h>
#define TILE_WIDTH 32
using namespace std;
//=====================================================================================
//Function to print matrices
void print(float *A, int n, int m)
{
for (int i=0; i<n; i++)
{
for (int j=0; j<m; j++)
{
cout<<A[n*i+j]<<" | ";
}
cout<<endl;
}
}
//=====================================================================================
//Function used just to fill the given matrix with a given value
void fillMatrix (float *mat, float value, int n, int m)
{
int size=n*m;
for (int i=0; i<size; i++)
{
mat[i] = value;
}
}
//=====================================================================================
//sequential
//Function used to multiply both matrices taking each matrix as a vector
void multMatrixsequential (float *h_matA, float *h_matB, float *h_matC, int n, int m, int o)
{
//Row*Width+Col to find the value in the given bidimensional index
for (int i=0; i<n; i++)
{
for (int j=0; j<o; j++)
{
float sum=0;
for (int k=0; k<m; k++)
{
sum += h_matA[m*i+k]*h_matB[o*k+j];
}
h_matC[o*i+j] = sum;
//cout<<h_matC[n*i+j]<<" | ";
}
//cout<<endl;
}
}
//=====================================================================================
//Parallel
//The multiplication kernel without tiles
__global__ void matrixMultKernel (float *d_matA, float *d_matB, float *d_matC, int n, int m, int o)
{
int Row = blockIdx.y*blockDim.y+threadIdx.y;
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if ((Row<n)&&(Col<o))
{
float temp=0;
for (int i=0; i<m; i++)
{
temp += d_matA[Row*m+i]*d_matB[i*o+Col];
}
d_matC[Row*o+Col] = temp;
}
}
//=====================================================================================
//the multiplication kernel with tiles
__global__ void matrixMulKernelTiled(float *d_matA, float *d_matB, float *d_matC, int n, int m, int o){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for (int k = 0; k < (m+TILE_WIDTH-1)/(TILE_WIDTH); ++k)
{
if (k*TILE_WIDTH + tx < m && row < n)
{
Mds[ty][tx] = d_matA[row * m + k*TILE_WIDTH + tx];
} else
{
Mds[ty][tx] = 0;
}
if (k*TILE_WIDTH + ty < m && col < o)
{
Nds[ty][tx] = d_matB[(k*TILE_WIDTH + ty) * o + col];
} else
{
Nds[ty][tx] =0;
}
__syncthreads();
for(int k = 0; k < TILE_WIDTH; ++k)
{
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if (row < n && col < o)
{
d_matC[row * o + col] = Pvalue;
}
}
//=====================================================================================
//Function to call the kernel of the tiled multiplication
void multMatrixParallelTiled(float *A, float *B, float *C, int n, int m, int o)
{
float blockSize = TILE_WIDTH;
float *d_matA, *d_matB, *d_matC;
//1. Allocate memory for d_matA, etc. on the device (hipMalloc)
hipMalloc(&d_matA, n * m * sizeof(float));
hipMalloc(&d_matB, m * o * sizeof(float));
hipMalloc(&d_matC, n * o * sizeof(float));
//2. Copy Data from host to d_matA, etc. (hipMemcpy)
hipMemcpy(d_matA, A, n * m * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_matB, B, m * o * sizeof(float), hipMemcpyHostToDevice);
dim3 threads(blockSize,blockSize,1); //How many blocks U want in each direction -- U have to respect the GPU's capacity
dim3 blocks(ceil(o/blockSize),ceil(n/blockSize),1);//How many threads U want to have per block --
//The GPU used in this course is capable of have 1024 threads per block
//3. Kernel Launch Code
hipLaunchKernelGGL(( matrixMultKernel), dim3(blocks),dim3(threads), 0, 0, d_matA,d_matB,d_matC,n,m,o);
hipMemcpy (C, d_matC, n * o * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_matA);
hipFree(d_matB);
hipFree(d_matC);
}
//=====================================================================================
//Function to call the tile less multiplication kernel
void multMatrixParallel(float *A, float *B, float *C, int n, int m, int o)
{
float blockSize = TILE_WIDTH;
float *d_matA, *d_matB, *d_matC;
//1. Allocate memory for d_matA, etc. on the device (hipMalloc)
hipMalloc(&d_matA, n * m * sizeof(float));
hipMalloc(&d_matB, m * o * sizeof(float));
hipMalloc(&d_matC, n * o * sizeof(float));
//2. Copy Data from host to d_matA, etc. (hipMemcpy)
hipMemcpy(d_matA, A, n * m * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_matB, B, m * o * sizeof(float), hipMemcpyHostToDevice);
dim3 threads(blockSize,blockSize,1); //How many blocks U want in each direction -- U have to respect the GPU's capacity
dim3 blocks(ceil(o/blockSize),ceil(n/blockSize),1);//How many threads U want to have per block --
//The GPU used in this course is capable of have 1024 threads per block
//3. Kernel Launch Code
hipLaunchKernelGGL(( matrixMultKernel), dim3(blocks),dim3(threads), 0, 0, d_matA,d_matB,d_matC,n,m,o);
hipMemcpy (C, d_matC, n * o * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_matA);
hipFree(d_matB);
hipFree(d_matC);
}
//=====================================================================================
//Function used to compare the results
int compareMatrix (float *A, float *B,int n, int m)
{
int size=n*m;
for (int i=0; i<size; i++ )
{
if (A[i]!=B[i])
{
cout<<"## sequential and Parallel results are NOT equal ##"<<endl;
return 0;
}
}
cout<<"== sequential and Parallel results are equal =="<<endl;
return 0;
}
//========================================== MAIN =====================================
int main()
{
clock_t start, finish;
double elapsedsequential,elapsedParallel,elapsedParallelTiles,optimizationP,optimizationT;
int n=2;
int m=4;
int o=8;
float *matA = (float *) malloc(n * m * sizeof(float));
float *matB = (float *) malloc(m * o * sizeof(float));
float *matCS = (float *) malloc(n * o * sizeof(float));
float *matCP = (float *) malloc(n * o * sizeof(float));
float *matCPT = (float *) malloc(n * o * sizeof(float));
fillMatrix(matA,1.5,n,m);
fillMatrix(matB,1.5,m,o);
fillMatrix(matCS,0,n,o);
fillMatrix(matCP,0,n,o);
fillMatrix(matCPT,0,n,o);
start = clock();
multMatrixsequential(matA,matB,matCS,n,m,o);
finish = clock();
elapsedsequential = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The sequential process took: " << elapsedsequential << " seconds to execute "<< endl<< endl;
start = clock();
multMatrixParallel(matA,matB,matCP,n,m,o);
finish = clock();
elapsedParallel = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The parallel process took: " << elapsedParallel << " seconds to execute "<< endl<< endl;
start = clock();
multMatrixParallelTiled(matA,matB,matCPT,n,m,o);
finish = clock();
elapsedParallelTiles = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The parallel process using Tiles took: " << elapsedParallelTiles << " seconds to execute "<< endl<< endl;
optimizationP = elapsedsequential/elapsedParallel;
cout<< "The acceleration we've got without using Tiles: " << optimizationP << "X" <<endl;
optimizationT = elapsedsequential/elapsedParallelTiles;
cout<< "The acceleration we've got using Tiles: " << optimizationT << "X" <<endl;
cout<< "Comparing Serial vs Parallel result " <<endl;
compareMatrix(matCS,matCP,n,o);
cout<< "Comparing Serial vs Parallel with Tiles result " <<endl;
compareMatrix(matCS,matCPT,n,o);
//For debugging porpouses only
//print(matCS,n,o);
//cout<<endl;
//print(matCP,n,o);
//cout<<endl;
//print(matCPT,n,o);
free (matA);
free (matB);
free (matCS);
free (matCP);
free (matCPT);
return 0;
}
|
561993ba30d0645f3add8c0e8d5531fac99e312b.cu
|
//Matrix multiplication using floats
//Anderson Alberto Ochoa Estupiñan
//Code: 1053823121
#include<stdio.h>
#include<iostream>
#include<cstdlib>
#include<time.h>
#include<cuda.h>
#define TILE_WIDTH 32
using namespace std;
//=====================================================================================
//Function to print matrices
void print(float *A, int n, int m)
{
for (int i=0; i<n; i++)
{
for (int j=0; j<m; j++)
{
cout<<A[n*i+j]<<" | ";
}
cout<<endl;
}
}
//=====================================================================================
//Function used just to fill the given matrix with a given value
void fillMatrix (float *mat, float value, int n, int m)
{
int size=n*m;
for (int i=0; i<size; i++)
{
mat[i] = value;
}
}
//=====================================================================================
//sequential
//Function used to multiply both matrices taking each matrix as a vector
void multMatrixsequential (float *h_matA, float *h_matB, float *h_matC, int n, int m, int o)
{
//Row*Width+Col to find the value in the given bidimensional index
for (int i=0; i<n; i++)
{
for (int j=0; j<o; j++)
{
float sum=0;
for (int k=0; k<m; k++)
{
sum += h_matA[m*i+k]*h_matB[o*k+j];
}
h_matC[o*i+j] = sum;
//cout<<h_matC[n*i+j]<<" | ";
}
//cout<<endl;
}
}
//=====================================================================================
//Parallel
//The multiplication kernel without tiles
__global__ void matrixMultKernel (float *d_matA, float *d_matB, float *d_matC, int n, int m, int o)
{
int Row = blockIdx.y*blockDim.y+threadIdx.y;
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if ((Row<n)&&(Col<o))
{
float temp=0;
for (int i=0; i<m; i++)
{
temp += d_matA[Row*m+i]*d_matB[i*o+Col];
}
d_matC[Row*o+Col] = temp;
}
}
//=====================================================================================
//the multiplication kernel with tiles
__global__ void matrixMulKernelTiled(float *d_matA, float *d_matB, float *d_matC, int n, int m, int o){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for (int k = 0; k < (m+TILE_WIDTH-1)/(TILE_WIDTH); ++k)
{
if (k*TILE_WIDTH + tx < m && row < n)
{
Mds[ty][tx] = d_matA[row * m + k*TILE_WIDTH + tx];
} else
{
Mds[ty][tx] = 0;
}
if (k*TILE_WIDTH + ty < m && col < o)
{
Nds[ty][tx] = d_matB[(k*TILE_WIDTH + ty) * o + col];
} else
{
Nds[ty][tx] =0;
}
__syncthreads();
for(int k = 0; k < TILE_WIDTH; ++k)
{
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if (row < n && col < o)
{
d_matC[row * o + col] = Pvalue;
}
}
//=====================================================================================
//Function to call the kernel of the tiled multiplication
void multMatrixParallelTiled(float *A, float *B, float *C, int n, int m, int o)
{
float blockSize = TILE_WIDTH;
float *d_matA, *d_matB, *d_matC;
//1. Allocate memory for d_matA, etc. on the device (cudaMalloc)
cudaMalloc(&d_matA, n * m * sizeof(float));
cudaMalloc(&d_matB, m * o * sizeof(float));
cudaMalloc(&d_matC, n * o * sizeof(float));
//2. Copy Data from host to d_matA, etc. (cudaMemcpy)
cudaMemcpy(d_matA, A, n * m * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_matB, B, m * o * sizeof(float), cudaMemcpyHostToDevice);
dim3 threads(blockSize,blockSize,1); //How many blocks U want in each direction -- U have to respect the GPU's capacity
dim3 blocks(ceil(o/blockSize),ceil(n/blockSize),1);//How many threads U want to have per block --
//The GPU used in this course is capable of have 1024 threads per block
//3. Kernel Launch Code
matrixMultKernel<<<blocks,threads>>>(d_matA,d_matB,d_matC,n,m,o);
cudaMemcpy (C, d_matC, n * o * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_matA);
cudaFree(d_matB);
cudaFree(d_matC);
}
//=====================================================================================
//Function to call the tile less multiplication kernel
void multMatrixParallel(float *A, float *B, float *C, int n, int m, int o)
{
float blockSize = TILE_WIDTH;
float *d_matA, *d_matB, *d_matC;
//1. Allocate memory for d_matA, etc. on the device (cudaMalloc)
cudaMalloc(&d_matA, n * m * sizeof(float));
cudaMalloc(&d_matB, m * o * sizeof(float));
cudaMalloc(&d_matC, n * o * sizeof(float));
//2. Copy Data from host to d_matA, etc. (cudaMemcpy)
cudaMemcpy(d_matA, A, n * m * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_matB, B, m * o * sizeof(float), cudaMemcpyHostToDevice);
dim3 threads(blockSize,blockSize,1); //How many blocks U want in each direction -- U have to respect the GPU's capacity
dim3 blocks(ceil(o/blockSize),ceil(n/blockSize),1);//How many threads U want to have per block --
//The GPU used in this course is capable of have 1024 threads per block
//3. Kernel Launch Code
matrixMultKernel<<<blocks,threads>>>(d_matA,d_matB,d_matC,n,m,o);
cudaMemcpy (C, d_matC, n * o * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_matA);
cudaFree(d_matB);
cudaFree(d_matC);
}
//=====================================================================================
//Function used to compare the results
int compareMatrix (float *A, float *B,int n, int m)
{
int size=n*m;
for (int i=0; i<size; i++ )
{
if (A[i]!=B[i])
{
cout<<"## sequential and Parallel results are NOT equal ##"<<endl;
return 0;
}
}
cout<<"== sequential and Parallel results are equal =="<<endl;
return 0;
}
//========================================== MAIN =====================================
int main()
{
clock_t start, finish;
double elapsedsequential,elapsedParallel,elapsedParallelTiles,optimizationP,optimizationT;
int n=2;
int m=4;
int o=8;
float *matA = (float *) malloc(n * m * sizeof(float));
float *matB = (float *) malloc(m * o * sizeof(float));
float *matCS = (float *) malloc(n * o * sizeof(float));
float *matCP = (float *) malloc(n * o * sizeof(float));
float *matCPT = (float *) malloc(n * o * sizeof(float));
fillMatrix(matA,1.5,n,m);
fillMatrix(matB,1.5,m,o);
fillMatrix(matCS,0,n,o);
fillMatrix(matCP,0,n,o);
fillMatrix(matCPT,0,n,o);
start = clock();
multMatrixsequential(matA,matB,matCS,n,m,o);
finish = clock();
elapsedsequential = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The sequential process took: " << elapsedsequential << " seconds to execute "<< endl<< endl;
start = clock();
multMatrixParallel(matA,matB,matCP,n,m,o);
finish = clock();
elapsedParallel = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The parallel process took: " << elapsedParallel << " seconds to execute "<< endl<< endl;
start = clock();
multMatrixParallelTiled(matA,matB,matCPT,n,m,o);
finish = clock();
elapsedParallelTiles = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The parallel process using Tiles took: " << elapsedParallelTiles << " seconds to execute "<< endl<< endl;
optimizationP = elapsedsequential/elapsedParallel;
cout<< "The acceleration we've got without using Tiles: " << optimizationP << "X" <<endl;
optimizationT = elapsedsequential/elapsedParallelTiles;
cout<< "The acceleration we've got using Tiles: " << optimizationT << "X" <<endl;
cout<< "Comparing Serial vs Parallel result " <<endl;
compareMatrix(matCS,matCP,n,o);
cout<< "Comparing Serial vs Parallel with Tiles result " <<endl;
compareMatrix(matCS,matCPT,n,o);
//For debugging porpouses only
//print(matCS,n,o);
//cout<<endl;
//print(matCP,n,o);
//cout<<endl;
//print(matCPT,n,o);
free (matA);
free (matB);
free (matCS);
free (matCP);
free (matCPT);
return 0;
}
|
30f63cd03653b920bd67f22069cf4d76e4b9fdb6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "chronoGPU.hpp"
#include "common.hpp"
#include <iostream>
ChronoGPU::ChronoGPU()
: m_started( false ) {
HANDLE_ERROR( hipEventCreate( &m_start ) );
HANDLE_ERROR( hipEventCreate( &m_end ) );
}
ChronoGPU::~ChronoGPU() {
if ( m_started ) {
stop();
std::cerr << "ChronoGPU::~ChronoGPU(): chrono wasn't turned off!" << std::endl;
}
HANDLE_ERROR( hipEventDestroy( m_start ) );
HANDLE_ERROR( hipEventDestroy( m_end ) );
}
void ChronoGPU::start() {
if ( !m_started ) {
HANDLE_ERROR( hipEventRecord( m_start, 0 ) );
m_started = true;
}
else
std::cerr << "ChronoGPU::start(): chrono is already started!" << std::endl;
}
void ChronoGPU::stop() {
if ( m_started ) {
HANDLE_ERROR( hipEventRecord( m_end, 0 ) );
HANDLE_ERROR( hipEventSynchronize( m_end ) );
m_started = false;
}
else
std::cerr << "ChronoGPU::stop(): chrono wasn't started!" << std::endl;
}
float ChronoGPU::elapsedTime() {
float time = 0.f;
HANDLE_ERROR( hipEventElapsedTime( &time, m_start, m_end ) );
return time;
}
|
30f63cd03653b920bd67f22069cf4d76e4b9fdb6.cu
|
#include "chronoGPU.hpp"
#include "common.hpp"
#include <iostream>
ChronoGPU::ChronoGPU()
: m_started( false ) {
HANDLE_ERROR( cudaEventCreate( &m_start ) );
HANDLE_ERROR( cudaEventCreate( &m_end ) );
}
ChronoGPU::~ChronoGPU() {
if ( m_started ) {
stop();
std::cerr << "ChronoGPU::~ChronoGPU(): chrono wasn't turned off!" << std::endl;
}
HANDLE_ERROR( cudaEventDestroy( m_start ) );
HANDLE_ERROR( cudaEventDestroy( m_end ) );
}
void ChronoGPU::start() {
if ( !m_started ) {
HANDLE_ERROR( cudaEventRecord( m_start, 0 ) );
m_started = true;
}
else
std::cerr << "ChronoGPU::start(): chrono is already started!" << std::endl;
}
void ChronoGPU::stop() {
if ( m_started ) {
HANDLE_ERROR( cudaEventRecord( m_end, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( m_end ) );
m_started = false;
}
else
std::cerr << "ChronoGPU::stop(): chrono wasn't started!" << std::endl;
}
float ChronoGPU::elapsedTime() {
float time = 0.f;
HANDLE_ERROR( cudaEventElapsedTime( &time, m_start, m_end ) );
return time;
}
|
f67bd806c9603c1c7b6505b97910233604c4746c.hip
|
// !!! This is a file automatically generated by hipify!!!
#if 0
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
#endif
|
f67bd806c9603c1c7b6505b97910233604c4746c.cu
|
#if 0
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
#endif
|
e52836c6ebb82090bf9d90fb468eb091cd15ba74.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
MIT License
Copyright (c) 2019 Michael Ksel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "kernel/predict.h"
#include "cuda_utils.h"
#include "common.h"
#include "dogm_types.h"
#include <thrust/random.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
__global__ void predictKernel(Particle* particle_array, int grid_size, float p_S, const glm::mat4x4 transition_matrix,
float process_noise_position, float process_noise_velocity, int particle_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x)
{
thrust::default_random_engine rng;
rng.discard(i);
thrust::normal_distribution<float> dist_noise_pos(0.0f, process_noise_position);
thrust::normal_distribution<float> dist_noise_vel(0.0f, process_noise_velocity);
glm::vec4 process_noise(dist_noise_pos(rng), dist_noise_pos(rng), dist_noise_vel(rng), dist_noise_vel(rng));
particle_array[i].state = transition_matrix * particle_array[i].state + process_noise;
particle_array[i].weight = p_S * particle_array[i].weight;
float x = particle_array[i].state[0];
float y = particle_array[i].state[1];
if ((x > grid_size - 1 || x < 0) || (y > grid_size - 1 || y < 0))
{
thrust::uniform_int_distribution<int> dist_idx(0, grid_size * grid_size);
thrust::normal_distribution<float> dist_vel(0.0f, 12.0);
const int index = dist_idx(rng);
x = index % grid_size;
y = index / grid_size;
particle_array[i].state = glm::vec4(x, y, dist_vel(rng), dist_vel(rng));
}
int pos_x = clamp(static_cast<int>(x), 0, grid_size - 1);
int pos_y = clamp(static_cast<int>(y), 0, grid_size - 1);
particle_array[i].grid_cell_idx = pos_x + grid_size * pos_y;
//printf("X: %d, Y: %d, Cell index: %d\n", pos_x, pos_y, (pos_x + grid_size * pos_y));
}
}
|
e52836c6ebb82090bf9d90fb468eb091cd15ba74.cu
|
/*
MIT License
Copyright (c) 2019 Michael Kösel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "kernel/predict.h"
#include "cuda_utils.h"
#include "common.h"
#include "dogm_types.h"
#include <thrust/random.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
__global__ void predictKernel(Particle* particle_array, int grid_size, float p_S, const glm::mat4x4 transition_matrix,
float process_noise_position, float process_noise_velocity, int particle_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x)
{
thrust::default_random_engine rng;
rng.discard(i);
thrust::normal_distribution<float> dist_noise_pos(0.0f, process_noise_position);
thrust::normal_distribution<float> dist_noise_vel(0.0f, process_noise_velocity);
glm::vec4 process_noise(dist_noise_pos(rng), dist_noise_pos(rng), dist_noise_vel(rng), dist_noise_vel(rng));
particle_array[i].state = transition_matrix * particle_array[i].state + process_noise;
particle_array[i].weight = p_S * particle_array[i].weight;
float x = particle_array[i].state[0];
float y = particle_array[i].state[1];
if ((x > grid_size - 1 || x < 0) || (y > grid_size - 1 || y < 0))
{
thrust::uniform_int_distribution<int> dist_idx(0, grid_size * grid_size);
thrust::normal_distribution<float> dist_vel(0.0f, 12.0);
const int index = dist_idx(rng);
x = index % grid_size;
y = index / grid_size;
particle_array[i].state = glm::vec4(x, y, dist_vel(rng), dist_vel(rng));
}
int pos_x = clamp(static_cast<int>(x), 0, grid_size - 1);
int pos_y = clamp(static_cast<int>(y), 0, grid_size - 1);
particle_array[i].grid_cell_idx = pos_x + grid_size * pos_y;
//printf("X: %d, Y: %d, Cell index: %d\n", pos_x, pos_y, (pos_x + grid_size * pos_y));
}
}
|
3cde093a586a0c3a45a325a91408ca01e07547c5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define NUM_THREADS 10
#define N 10
// first argument: data type of texture elements
// second argument: types of texture reference which can be one-dimensional, two-dimensional...
// third argument: read mode, optional
texture <float, 1, hipReadModeElementType> textureRef;
__global__ void gpu_texture_memory(int n, float *d_out)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < N) {
float temp = tex1D(textureRef, float(idx));
d_out[idx] = temp;
}
}
int main()
{
int num_blocks = N / NUM_THREADS + ((N % NUM_THREADS) ? 1 : 0);
float *d_out;
hipMalloc((void**)&d_out, sizeof(float)*N);
float h_out[N], h_in[N];
for (int i = 0; i < N; i++) {
h_in[i] = float(i);
}
// Define cuda array which is dedicated to textures compared to normal array
hipArray *cu_array;
hipMallocArray(&cu_array, &textureRef.channelDesc, N, 1);
// copy data to cuda array
// 0, 0 meaning starting from the top left corner
hipMemcpyToArray(cu_array, 0, 0, h_in, N*sizeof(float), hipMemcpyHostToDevice);
// bind a texture to the CUDA array
hipBindTextureToArray(textureRef, cu_array);
gpu_texture_memory << <num_blocks, NUM_THREADS>> > (N, d_out);
hipMemcpy(h_out, d_out, N*sizeof(float), hipMemcpyDeviceToHost);
printf("Use of texture memory on GPU: \n");
for (int i = 0; i < N; i++) {
printf("Texture element at %d is: %f\n", i, h_out[i]);
}
hipFree(d_out);
hipFreeArray(cu_array);
hipUnbindTexture(textureRef);
}
|
3cde093a586a0c3a45a325a91408ca01e07547c5.cu
|
#include <cstdio>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#define NUM_THREADS 10
#define N 10
// first argument: data type of texture elements
// second argument: types of texture reference which can be one-dimensional, two-dimensional...
// third argument: read mode, optional
texture <float, 1, cudaReadModeElementType> textureRef;
__global__ void gpu_texture_memory(int n, float *d_out)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < N) {
float temp = tex1D(textureRef, float(idx));
d_out[idx] = temp;
}
}
int main()
{
int num_blocks = N / NUM_THREADS + ((N % NUM_THREADS) ? 1 : 0);
float *d_out;
cudaMalloc((void**)&d_out, sizeof(float)*N);
float h_out[N], h_in[N];
for (int i = 0; i < N; i++) {
h_in[i] = float(i);
}
// Define cuda array which is dedicated to textures compared to normal array
cudaArray *cu_array;
cudaMallocArray(&cu_array, &textureRef.channelDesc, N, 1);
// copy data to cuda array
// 0, 0 meaning starting from the top left corner
cudaMemcpyToArray(cu_array, 0, 0, h_in, N*sizeof(float), cudaMemcpyHostToDevice);
// bind a texture to the CUDA array
cudaBindTextureToArray(textureRef, cu_array);
gpu_texture_memory << <num_blocks, NUM_THREADS>> > (N, d_out);
cudaMemcpy(h_out, d_out, N*sizeof(float), cudaMemcpyDeviceToHost);
printf("Use of texture memory on GPU: \n");
for (int i = 0; i < N; i++) {
printf("Texture element at %d is: %f\n", i, h_out[i]);
}
cudaFree(d_out);
cudaFreeArray(cu_array);
cudaUnbindTexture(textureRef);
}
|
3bbbcdfafc534fba42c240d1c02e97e66ef066c6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zge3pt.cu, normal z -> s, Mon Jun 25 18:24:26 2018
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
// 3-pt stencil kernel
__global__ void
sge3pt_kernel(
int num_rows,
int num_cols,
float alpha,
float beta,
float * dx,
float * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if( row >= num_rows ){
return;
} else {
for( int i=0; i<num_cols; i++ ){
if (row == num_rows-1) {
dy[ row+i*num_rows ] = alpha * (- 2 * dx[ row+i*num_rows ]
+ dx[ row+i*num_rows-1 ])
+ beta * dy[ row+i*num_rows ] ;
} else if(row == 0) {
dy[ row+i*num_rows ] = alpha * (- 2 * dx[ row+i*num_rows ]
+ dx[ row+i*num_rows+1 ])
+ beta * dy[ row+i*num_rows ] ;
} else {
dy[ row+i*num_rows ] = alpha * (- 2 * dx[ row+i*num_rows ]
+ dx[ row+i*num_rows-1 ] + dx[ row+i*num_rows+1 ])
+ beta * dy[ row+i*num_rows ] ;
}
}
}
}
/**
Purpose
-------
This routine is a 3-pt-stencil operator derived from a FD-scheme in 2D
with Dirichlet boundary.
It computes y_i = -2 x_i + x_{i-1} + x_{i+1}
Arguments
---------
@param[in]
m magma_int_t
number of rows in x and y
@param[in]
n magma_int_t
number of columns in x and y
@param[in]
alpha float
scalar multiplier
@param[in]
beta float
scalar multiplier
@param[in]
dx magmaFloat_ptr
input vector x
@param[out]
dy magmaFloat_ptr
output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sge3pt(
magma_int_t m,
magma_int_t n,
float alpha,
float beta,
magmaFloat_ptr dx,
magmaFloat_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sge3pt_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, beta, dx, dy );
return MAGMA_SUCCESS;
}
|
3bbbcdfafc534fba42c240d1c02e97e66ef066c6.cu
|
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zge3pt.cu, normal z -> s, Mon Jun 25 18:24:26 2018
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
// 3-pt stencil kernel
__global__ void
sge3pt_kernel(
int num_rows,
int num_cols,
float alpha,
float beta,
float * dx,
float * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if( row >= num_rows ){
return;
} else {
for( int i=0; i<num_cols; i++ ){
if (row == num_rows-1) {
dy[ row+i*num_rows ] = alpha * (- 2 * dx[ row+i*num_rows ]
+ dx[ row+i*num_rows-1 ])
+ beta * dy[ row+i*num_rows ] ;
} else if(row == 0) {
dy[ row+i*num_rows ] = alpha * (- 2 * dx[ row+i*num_rows ]
+ dx[ row+i*num_rows+1 ])
+ beta * dy[ row+i*num_rows ] ;
} else {
dy[ row+i*num_rows ] = alpha * (- 2 * dx[ row+i*num_rows ]
+ dx[ row+i*num_rows-1 ] + dx[ row+i*num_rows+1 ])
+ beta * dy[ row+i*num_rows ] ;
}
}
}
}
/**
Purpose
-------
This routine is a 3-pt-stencil operator derived from a FD-scheme in 2D
with Dirichlet boundary.
It computes y_i = -2 x_i + x_{i-1} + x_{i+1}
Arguments
---------
@param[in]
m magma_int_t
number of rows in x and y
@param[in]
n magma_int_t
number of columns in x and y
@param[in]
alpha float
scalar multiplier
@param[in]
beta float
scalar multiplier
@param[in]
dx magmaFloat_ptr
input vector x
@param[out]
dy magmaFloat_ptr
output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sge3pt(
magma_int_t m,
magma_int_t n,
float alpha,
float beta,
magmaFloat_ptr dx,
magmaFloat_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
sge3pt_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, alpha, beta, dx, dy );
return MAGMA_SUCCESS;
}
|
9574e2d9b0ffaaad582c03643c662df459bc341d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Submission should be named as <RollNumber>_Prog.cu
//Upload just this cu file and nothing else. If you upload it as a zip, it will not be evaluated.
#include <stdio.h>
#define M 514
//Input has 514 rows and columns
#define N 512
//For output, only 512 rows and columns need to be computed.
//TODO: WRITE GPU KERNEL. It should not be called repeatedly from the host, but just once. Each time it is called, it may process more than pixel or not //process any pixel at all.
//Code to proces 514*514 input elements using 9*48 threads
//Additional checks included to skip processing boundary elements (only 512*512 to be computed)
// i> 513 : Skip Top row ; i < 263682 : Skip Bottom row
// i%514=0 : Skip left most column ; (i+1)%514=0 : Skip Right most column
__global__ void computeOutput( int *a, int *b, int size) {
int numThrds = 9*48;
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
for (int i=threadID;i<size;i+=numThrds) {
if ((i > 513) && (i < 263682 ) && (i%514 != 0 ) & ((i+1)%514 != 0) ){
b[i] = (a[i-1]+a[i+1]+a[i-514]+a[i+514])/4;
}
}
}
main (int argc, char **argv) {
int A[M][M], B[M][M];
int *d_A, *d_B; // These are the copies of A and B on the GPU
int *h_B; // This is a host copy of the output of B from the GPU
int i, j;
// Input is randomly generated
for(i=0;i<M;i++) {
for(j=0;j<M;j++) {
A[i][j] = rand()/1795831;
//printf("%d\n",A[i][j]);
}
}
// sequential implementation of main computation
for(i=1;i<M-1;i++) {
for(j=1;j<M-1;j++) {
B[i][j] = (A[i-1][j]+A[i+1][j]+A[i][j-1]+A[i][j+1])/4;
}
}
// TODO: ALLOCATE MEMORY FOR GPU COPIES OF d_A AND d_B
hipMalloc((void **) &d_A, M*M*sizeof(int));
hipMalloc((void **) &d_B, M*M*sizeof(int));
int insize = M*M*sizeof(int);
// TODO: COPY A TO d_A
hipMemcpy(d_A, A, insize, hipMemcpyHostToDevice);
// TODO: CREATE BLOCKS with THREADS AND INVOKE GPU KERNEL
//Use 9 blocks, each with 48 threads
hipLaunchKernelGGL(( computeOutput), dim3(9),dim3(48), 0, 0, d_A,d_B,M*M);
hipDeviceSynchronize();
// TODO: COPY d_B BACK FROM GPU to CPU in variable h_B
h_B = (int *) malloc(insize);
hipMemcpy(h_B, d_B, insize, hipMemcpyDeviceToHost);
// TODO: Verify result is correct by comparing
int error_cnt=0;
for(i=1;i<M-1;i++) {
for(j=1;j<M-1;j++) {
//print only those elements for which the above subtraction is non-zero
if (B[i][j] - h_B[i*514+j] != 0) {
printf("i: %d ,j: %d , B[i][j]: %d , h_B[i*514+j]: %d \n", i,j,B[i][j],h_B[i*514+j]);
error_cnt=error_cnt+1;
}
}
}
//IF even one element of h_B and B differ, report an error.
//Otherwise, there is no error.
//If your program is correct, no error should occur.
if (error_cnt>1) {
printf("Error: Found %d discrepancies between CPU and GPU calculation \n", error_cnt);
}
else {
printf("Code completed successfully! \n");
}
}
/*Remember the following guidelines to avoid losing marks
Index of an array should not exceed the array size.
Do not ignore the fact that boundary rows and columns need not be computed (in fact, they cannot be computed since they don't have four neighbors)
No output array-element should be computed more than once
No marks will be given if the program does not compile or run (TAs will not debug your program at all)
*/
|
9574e2d9b0ffaaad582c03643c662df459bc341d.cu
|
//Submission should be named as <RollNumber>_Prog.cu
//Upload just this cu file and nothing else. If you upload it as a zip, it will not be evaluated.
#include <stdio.h>
#define M 514
//Input has 514 rows and columns
#define N 512
//For output, only 512 rows and columns need to be computed.
//TODO: WRITE GPU KERNEL. It should not be called repeatedly from the host, but just once. Each time it is called, it may process more than pixel or not //process any pixel at all.
//Code to proces 514*514 input elements using 9*48 threads
//Additional checks included to skip processing boundary elements (only 512*512 to be computed)
// i> 513 : Skip Top row ; i < 263682 : Skip Bottom row
// i%514=0 : Skip left most column ; (i+1)%514=0 : Skip Right most column
__global__ void computeOutput( int *a, int *b, int size) {
int numThrds = 9*48;
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
for (int i=threadID;i<size;i+=numThrds) {
if ((i > 513) && (i < 263682 ) && (i%514 != 0 ) & ((i+1)%514 != 0) ){
b[i] = (a[i-1]+a[i+1]+a[i-514]+a[i+514])/4;
}
}
}
main (int argc, char **argv) {
int A[M][M], B[M][M];
int *d_A, *d_B; // These are the copies of A and B on the GPU
int *h_B; // This is a host copy of the output of B from the GPU
int i, j;
// Input is randomly generated
for(i=0;i<M;i++) {
for(j=0;j<M;j++) {
A[i][j] = rand()/1795831;
//printf("%d\n",A[i][j]);
}
}
// sequential implementation of main computation
for(i=1;i<M-1;i++) {
for(j=1;j<M-1;j++) {
B[i][j] = (A[i-1][j]+A[i+1][j]+A[i][j-1]+A[i][j+1])/4;
}
}
// TODO: ALLOCATE MEMORY FOR GPU COPIES OF d_A AND d_B
cudaMalloc((void **) &d_A, M*M*sizeof(int));
cudaMalloc((void **) &d_B, M*M*sizeof(int));
int insize = M*M*sizeof(int);
// TODO: COPY A TO d_A
cudaMemcpy(d_A, A, insize, cudaMemcpyHostToDevice);
// TODO: CREATE BLOCKS with THREADS AND INVOKE GPU KERNEL
//Use 9 blocks, each with 48 threads
computeOutput<<<9,48>>>(d_A,d_B,M*M);
cudaDeviceSynchronize();
// TODO: COPY d_B BACK FROM GPU to CPU in variable h_B
h_B = (int *) malloc(insize);
cudaMemcpy(h_B, d_B, insize, cudaMemcpyDeviceToHost);
// TODO: Verify result is correct by comparing
int error_cnt=0;
for(i=1;i<M-1;i++) {
for(j=1;j<M-1;j++) {
//print only those elements for which the above subtraction is non-zero
if (B[i][j] - h_B[i*514+j] != 0) {
printf("i: %d ,j: %d , B[i][j]: %d , h_B[i*514+j]: %d \n", i,j,B[i][j],h_B[i*514+j]);
error_cnt=error_cnt+1;
}
}
}
//IF even one element of h_B and B differ, report an error.
//Otherwise, there is no error.
//If your program is correct, no error should occur.
if (error_cnt>1) {
printf("Error: Found %d discrepancies between CPU and GPU calculation \n", error_cnt);
}
else {
printf("Code completed successfully! \n");
}
}
/*Remember the following guidelines to avoid losing marks
Index of an array should not exceed the array size.
Do not ignore the fact that boundary rows and columns need not be computed (in fact, they cannot be computed since they don't have four neighbors)
No output array-element should be computed more than once
No marks will be given if the program does not compile or run (TAs will not debug your program at all)
*/
|
b84de7d2b4180c17f78acdefd6e3875803a76c18.hip
|
// !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/simd_functions.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace arithm
{
template <size_t src_size, size_t dst_size> struct ArithmFuncTraits
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 1 };
};
template <> struct ArithmFuncTraits<1, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<1, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<1, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
}
//////////////////////////////////////////////////////////////////////////
// addMat
namespace arithm
{
struct VAdd4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vadd4(a, b);
}
__host__ __device__ __forceinline__ VAdd4() {}
__host__ __device__ __forceinline__ VAdd4(const VAdd4&) {}
};
////////////////////////////////////
struct VAdd2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vadd2(a, b);
}
__host__ __device__ __forceinline__ VAdd2() {}
__host__ __device__ __forceinline__ VAdd2(const VAdd2&) {}
};
////////////////////////////////////
template <typename T, typename D> struct AddMat : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a + b);
}
__host__ __device__ __forceinline__ AddMat() {}
__host__ __device__ __forceinline__ AddMat(const AddMat&) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits< arithm::VAdd4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VAdd2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::AddMat<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void addMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, VAdd4(), WithOutMask(), stream);
}
void addMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, VAdd2(), WithOutMask(), stream);
}
template <typename T, typename D>
void addMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, AddMat<T, D>(), mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, AddMat<T, D>(), WithOutMask(), stream);
}
template void addMat<uchar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void addMat<uchar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<ushort, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<ushort, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<ushort, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<ushort, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<ushort, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<ushort, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<ushort, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<short, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<short, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<short, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<short, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<short, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<short, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<short, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<int, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<int, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<int, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<int, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<int, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<int, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
#endif
template void addMat<float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void addMat<float, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// addScalar
namespace arithm
{
template <typename T, typename S, typename D> struct AddScalar : unary_function<T, D>
{
S val;
explicit AddScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a + val);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::AddScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void addScalar(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
AddScalar<T, S, D> op(static_cast<S>(val));
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void addScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void addScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
#endif
template void addScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void addScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// subMat
namespace arithm
{
struct VSub4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vsub4(a, b);
}
__host__ __device__ __forceinline__ VSub4() {}
__host__ __device__ __forceinline__ VSub4(const VSub4&) {}
};
////////////////////////////////////
struct VSub2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vsub2(a, b);
}
__host__ __device__ __forceinline__ VSub2() {}
__host__ __device__ __forceinline__ VSub2(const VSub2&) {}
};
////////////////////////////////////
template <typename T, typename D> struct SubMat : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a - b);
}
__host__ __device__ __forceinline__ SubMat() {}
__host__ __device__ __forceinline__ SubMat(const SubMat&) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits< arithm::VSub4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VSub2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::SubMat<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void subMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, VSub4(), WithOutMask(), stream);
}
void subMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, VSub2(), WithOutMask(), stream);
}
template <typename T, typename D>
void subMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), WithOutMask(), stream);
}
template void subMat<uchar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void subMat<uchar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<ushort, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<ushort, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<ushort, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<ushort, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<ushort, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<ushort, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<ushort, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<short, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<short, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<short, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<short, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<short, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<short, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<short, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<int, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<int, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<int, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<int, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<int, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<int, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
#endif
template void subMat<float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void subMat<float, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// subScalar
namespace arithm
{
template <typename T, typename S, typename D>
void subScalar(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
AddScalar<T, S, D> op(-static_cast<S>(val));
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void subScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void subScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
#endif
template void subScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void subScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// mulMat
namespace arithm
{
struct Mul_8uc4_32f : binary_function<uint, float, uint>
{
__device__ __forceinline__ uint operator ()(uint a, float b) const
{
uint res = 0;
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
return res;
}
__host__ __device__ __forceinline__ Mul_8uc4_32f() {}
__host__ __device__ __forceinline__ Mul_8uc4_32f(const Mul_8uc4_32f&) {}
};
struct Mul_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return make_short4(saturate_cast<short>(a.x * b), saturate_cast<short>(a.y * b),
saturate_cast<short>(a.z * b), saturate_cast<short>(a.w * b));
}
__host__ __device__ __forceinline__ Mul_16sc4_32f() {}
__host__ __device__ __forceinline__ Mul_16sc4_32f(const Mul_16sc4_32f&) {}
};
template <typename T, typename D> struct Mul : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a * b);
}
__host__ __device__ __forceinline__ Mul() {}
__host__ __device__ __forceinline__ Mul(const Mul&) {}
};
template <typename T, typename S, typename D> struct MulScale : binary_function<T, T, D>
{
S scale;
explicit MulScale(S scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(scale * a * b);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits<arithm::Mul_8uc4_32f> : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T, typename D> struct TransformFunctorTraits< arithm::Mul<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScale<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void mulMat_8uc4_32f(PtrStepSz<uint> src1, PtrStepSzf src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, Mul_8uc4_32f(), WithOutMask(), stream);
}
void mulMat_16sc4_32f(PtrStepSz<short4> src1, PtrStepSzf src2, PtrStepSz<short4> dst, hipStream_t stream)
{
transform(src1, src2, dst, Mul_16sc4_32f(), WithOutMask(), stream);
}
template <typename T, typename S, typename D>
void mulMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream)
{
if (scale == 1)
{
Mul<T, D> op;
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
else
{
MulScale<T, S, D> op(static_cast<S>(scale));
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
}
template void mulMat<uchar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void mulMat<uchar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<ushort, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<ushort, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<short, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<short, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<int, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<int, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<int, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<int, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<int, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<int, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<int, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
#endif
template void mulMat<float, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void mulMat<float, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<double, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// mulScalar
namespace arithm
{
template <typename T, typename S, typename D> struct MulScalar : unary_function<T, D>
{
S val;
explicit MulScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a * val);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void mulScalar(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream)
{
MulScalar<T, S, D> op(static_cast<S>(val));
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void mulScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void mulScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
#endif
template void mulScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void mulScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// divMat
namespace arithm
{
struct Div_8uc4_32f : binary_function<uint, float, uint>
{
__device__ __forceinline__ uint operator ()(uint a, float b) const
{
uint res = 0;
if (b != 0)
{
b = 1.0f / b;
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
}
return res;
}
};
struct Div_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return b != 0 ? make_short4(saturate_cast<short>(a.x / b), saturate_cast<short>(a.y / b),
saturate_cast<short>(a.z / b), saturate_cast<short>(a.w / b))
: make_short4(0,0,0,0);
}
};
template <typename T, typename D> struct Div : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return b != 0 ? saturate_cast<D>(a / b) : 0;
}
__host__ __device__ __forceinline__ Div() {}
__host__ __device__ __forceinline__ Div(const Div&) {}
};
template <typename T> struct Div<T, float> : binary_function<T, T, float>
{
__device__ __forceinline__ float operator ()(T a, T b) const
{
return b != 0 ? static_cast<float>(a) / b : 0;
}
__host__ __device__ __forceinline__ Div() {}
__host__ __device__ __forceinline__ Div(const Div&) {}
};
template <typename T> struct Div<T, double> : binary_function<T, T, double>
{
__device__ __forceinline__ double operator ()(T a, T b) const
{
return b != 0 ? static_cast<double>(a) / b : 0;
}
__host__ __device__ __forceinline__ Div() {}
__host__ __device__ __forceinline__ Div(const Div&) {}
};
template <typename T, typename S, typename D> struct DivScale : binary_function<T, T, D>
{
S scale;
explicit DivScale(S scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
return b != 0 ? saturate_cast<D>(scale * a / b) : 0;
}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits<arithm::Div_8uc4_32f> : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T, typename D> struct TransformFunctorTraits< arithm::Div<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivScale<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void divMat_8uc4_32f(PtrStepSz<uint> src1, PtrStepSzf src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, Div_8uc4_32f(), WithOutMask(), stream);
}
void divMat_16sc4_32f(PtrStepSz<short4> src1, PtrStepSzf src2, PtrStepSz<short4> dst, hipStream_t stream)
{
transform(src1, src2, dst, Div_16sc4_32f(), WithOutMask(), stream);
}
template <typename T, typename S, typename D>
void divMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream)
{
if (scale == 1)
{
Div<T, D> op;
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
else
{
DivScale<T, S, D> op(static_cast<S>(scale));
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
}
template void divMat<uchar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void divMat<uchar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<ushort, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<ushort, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<ushort, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<ushort, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<ushort, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<ushort, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<ushort, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<short, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<short, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<short, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<short, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<short, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<short, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<short, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<int, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<int, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<int, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<int, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<int, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<int, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<int, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<float, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<float, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<float, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<float, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<float, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
#endif
template void divMat<float, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void divMat<float, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<double, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// divScalar
namespace arithm
{
template <typename T, typename S, typename D>
void divScalar(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream)
{
MulScalar<T, S, D> op(static_cast<S>(1.0 / val));
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void divScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void divScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
#endif
template void divScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void divScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// divInv
namespace arithm
{
template <typename T, typename S, typename D> struct DivInv : unary_function<T, D>
{
S val;
explicit DivInv(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return a != 0 ? saturate_cast<D>(val / a) : 0;
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivInv<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void divInv(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream)
{
DivInv<T, S, D> op(static_cast<S>(val));
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void divInv<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void divInv<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
#endif
template void divInv<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void divInv<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// absDiffMat
namespace arithm
{
struct VAbsDiff4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vabsdiff4(a, b);
}
__host__ __device__ __forceinline__ VAbsDiff4() {}
__host__ __device__ __forceinline__ VAbsDiff4(const VAbsDiff4&) {}
};
////////////////////////////////////
struct VAbsDiff2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vabsdiff2(a, b);
}
__host__ __device__ __forceinline__ VAbsDiff2() {}
__host__ __device__ __forceinline__ VAbsDiff2(const VAbsDiff2&) {}
};
////////////////////////////////////
__device__ __forceinline__ int _abs(int a)
{
return ::abs(a);
}
__device__ __forceinline__ float _abs(float a)
{
return ::fabsf(a);
}
__device__ __forceinline__ double _abs(double a)
{
return ::fabs(a);
}
template <typename T> struct AbsDiffMat : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(T a, T b) const
{
return saturate_cast<T>(_abs(a - b));
}
__host__ __device__ __forceinline__ AbsDiffMat() {}
__host__ __device__ __forceinline__ AbsDiffMat(const AbsDiffMat&) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits< arithm::VAbsDiff4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VAbsDiff2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< arithm::AbsDiffMat<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void absDiffMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, VAbsDiff4(), WithOutMask(), stream);
}
void absDiffMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, VAbsDiff2(), WithOutMask(), stream);
}
template <typename T>
void absDiffMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, AbsDiffMat<T>(), WithOutMask(), stream);
}
template void absDiffMat<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void absDiffMat<schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#endif
template void absDiffMat<float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void absDiffMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// absDiffScalar
namespace arithm
{
template <typename T, typename S> struct AbsDiffScalar : unary_function<T, T>
{
S val;
explicit AbsDiffScalar(S val_) : val(val_) {}
__device__ __forceinline__ T operator ()(T a) const
{
abs_func<S> f;
return saturate_cast<T>(f(a - val));
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename S> struct TransformFunctorTraits< arithm::AbsDiffScalar<T, S> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T, typename S>
void absDiffScalar(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream)
{
AbsDiffScalar<T, S> op(static_cast<S>(val));
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, op, WithOutMask(), stream);
}
template void absDiffScalar<uchar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void absDiffScalar<schar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<ushort, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<short, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<int, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
#endif
template void absDiffScalar<float, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void absDiffScalar<double, double>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// absMat
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< abs_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void absMat(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, abs_func<T>(), WithOutMask(), stream);
}
#ifndef OPENCV_TINY_GPU_MODULE
template void absMat<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void absMat<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void absMat<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void absMat<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void absMat<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
#endif
template void absMat<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void absMat<double>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// sqrMat
namespace arithm
{
template <typename T> struct Sqr : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(T x) const
{
return saturate_cast<T>(x * x);
}
__host__ __device__ __forceinline__ Sqr() {}
__host__ __device__ __forceinline__ Sqr(const Sqr&) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< arithm::Sqr<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void sqrMat(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, Sqr<T>(), WithOutMask(), stream);
}
#ifndef OPENCV_TINY_GPU_MODULE
template void sqrMat<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrMat<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrMat<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrMat<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrMat<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
#endif
template void sqrMat<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void sqrMat<double>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// sqrtMat
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< sqrt_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void sqrtMat(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, sqrt_func<T>(), WithOutMask(), stream);
}
#ifndef OPENCV_TINY_GPU_MODULE
template void sqrtMat<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrtMat<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrtMat<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrtMat<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrtMat<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
#endif
template void sqrtMat<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void sqrtMat<double>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// logMat
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< log_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void logMat(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, log_func<T>(), WithOutMask(), stream);
}
#ifndef OPENCV_TINY_GPU_MODULE
template void logMat<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void logMat<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void logMat<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void logMat<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void logMat<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
#endif
template void logMat<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void logMat<double>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// expMat
namespace arithm
{
template <typename T> struct Exp : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(T x) const
{
exp_func<T> f;
return saturate_cast<T>(f(x));
}
__host__ __device__ __forceinline__ Exp() {}
__host__ __device__ __forceinline__ Exp(const Exp&) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< arithm::Exp<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void expMat(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, Exp<T>(), WithOutMask(), stream);
}
#ifndef OPENCV_TINY_GPU_MODULE
template void expMat<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void expMat<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void expMat<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void expMat<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void expMat<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
#endif
template void expMat<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void expMat<double>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////////////////
// cmpMat
namespace arithm
{
struct VCmpEq4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vcmpeq4(a, b);
}
__host__ __device__ __forceinline__ VCmpEq4() {}
__host__ __device__ __forceinline__ VCmpEq4(const VCmpEq4&) {}
};
struct VCmpNe4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vcmpne4(a, b);
}
__host__ __device__ __forceinline__ VCmpNe4() {}
__host__ __device__ __forceinline__ VCmpNe4(const VCmpNe4&) {}
};
struct VCmpLt4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vcmplt4(a, b);
}
__host__ __device__ __forceinline__ VCmpLt4() {}
__host__ __device__ __forceinline__ VCmpLt4(const VCmpLt4&) {}
};
struct VCmpLe4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vcmple4(a, b);
}
__host__ __device__ __forceinline__ VCmpLe4() {}
__host__ __device__ __forceinline__ VCmpLe4(const VCmpLe4&) {}
};
////////////////////////////////////
template <class Op, typename T>
struct Cmp : binary_function<T, T, uchar>
{
__device__ __forceinline__ uchar operator()(T a, T b) const
{
Op op;
return -op(a, b);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits< arithm::VCmpEq4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VCmpNe4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VCmpLt4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VCmpLe4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <class Op, typename T> struct TransformFunctorTraits< arithm::Cmp<Op, T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
{
};
}}}
namespace arithm
{
void cmpMatEq_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, VCmpEq4(), WithOutMask(), stream);
}
void cmpMatNe_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, VCmpNe4(), WithOutMask(), stream);
}
void cmpMatLt_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, VCmpLt4(), WithOutMask(), stream);
}
void cmpMatLe_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, VCmpLe4(), WithOutMask(), stream);
}
template <template <typename> class Op, typename T>
void cmpMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
Cmp<Op<T>, T> op;
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, dst, op, WithOutMask(), stream);
}
template <typename T> void cmpMatEq(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cmpMat<equal_to, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatNe(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cmpMat<not_equal_to, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatLt(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cmpMat<less, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatLe(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cmpMat<less_equal, T>(src1, src2, dst, stream);
}
template void cmpMatEq<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpMatEq<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatEq<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatEq<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatEq<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpMatEq<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpMatEq<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpMatNe<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpMatNe<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpMatNe<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpMatNe<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpMatLt<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpMatLt<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpMatLt<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpMatLt<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpMatLe<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpMatLe<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpMatLe<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpMatLe<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////////////////
// cmpScalar
namespace arithm
{
#define TYPE_VEC(type, cn) typename TypeVec<type, cn>::vec_type
template <class Op, typename T, int cn> struct CmpScalar;
template <class Op, typename T>
struct CmpScalar<Op, T, 1> : unary_function<T, uchar>
{
const T val;
__host__ explicit CmpScalar(T val_) : val(val_) {}
__device__ __forceinline__ uchar operator()(T src) const
{
Cmp<Op, T> op;
return op(src, val);
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 2> : unary_function<TYPE_VEC(T, 2), TYPE_VEC(uchar, 2)>
{
const TYPE_VEC(T, 2) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 2) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 2) operator()(const TYPE_VEC(T, 2) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 2)>::make(op(src.x, val.x), op(src.y, val.y));
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 3> : unary_function<TYPE_VEC(T, 3), TYPE_VEC(uchar, 3)>
{
const TYPE_VEC(T, 3) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 3) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 3) operator()(const TYPE_VEC(T, 3) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 3)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z));
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 4> : unary_function<TYPE_VEC(T, 4), TYPE_VEC(uchar, 4)>
{
const TYPE_VEC(T, 4) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 4) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 4) operator()(const TYPE_VEC(T, 4) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 4)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z), op(src.w, val.w));
}
};
#undef TYPE_VEC
}
namespace cv { namespace gpu { namespace device
{
template <class Op, typename T> struct TransformFunctorTraits< arithm::CmpScalar<Op, T, 1> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
{
};
}}}
namespace arithm
{
template <template <typename> class Op, typename T, int cn>
void cmpScalar(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type src_t;
typedef typename TypeVec<uchar, cn>::vec_type dst_t;
T sval[] = {static_cast<T>(val[0]), static_cast<T>(val[1]), static_cast<T>(val[2]), static_cast<T>(val[3])};
src_t val1 = VecTraits<src_t>::make(sval);
CmpScalar<Op<T>, T, cn> op(val1);
transform((PtrStepSz<src_t>) src, (PtrStepSz<dst_t>) dst, op, WithOutMask(), stream);
}
template <typename T> void cmpScalarEq(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<equal_to, T, 1>,
cmpScalar<equal_to, T, 2>,
cmpScalar<equal_to, T, 3>,
cmpScalar<equal_to, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarNe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<not_equal_to, T, 1>,
cmpScalar<not_equal_to, T, 2>,
cmpScalar<not_equal_to, T, 3>,
cmpScalar<not_equal_to, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarLt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<less, T, 1>,
cmpScalar<less, T, 2>,
cmpScalar<less, T, 3>,
cmpScalar<less, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarLe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<less_equal, T, 1>,
cmpScalar<less_equal, T, 2>,
cmpScalar<less_equal, T, 3>,
cmpScalar<less_equal, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarGt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<greater, T, 1>,
cmpScalar<greater, T, 2>,
cmpScalar<greater, T, 3>,
cmpScalar<greater, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarGe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<greater_equal, T, 1>,
cmpScalar<greater_equal, T, 2>,
cmpScalar<greater_equal, T, 3>,
cmpScalar<greater_equal, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template void cmpScalarEq<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarEq<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpScalarEq<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarEq<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpScalarNe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarNe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpScalarNe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarNe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpScalarLt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarLt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpScalarLt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarLt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpScalarLe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarLe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpScalarLe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarLe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpScalarGt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarGt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpScalarGt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarGt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpScalarGe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarGe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#endif
template void cmpScalarGe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarGe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////////////////
// bitMat
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< bit_not<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_and<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_or<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_xor<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T> void bitMatNot(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, int num_channels, hipStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, bit_not<T>(), SingleMaskChannels(mask, num_channels), stream);
else
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, bit_not<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatAnd(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, hipStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_and<T>(), SingleMaskChannels(mask, num_channels), stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_and<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatOr(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, hipStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_or<T>(), SingleMaskChannels(mask, num_channels), stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_or<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatXor(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, hipStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_xor<T>(), SingleMaskChannels(mask, num_channels), stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_xor<T>(), WithOutMask(), stream);
}
template void bitMatNot<uchar>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, int num_channels, hipStream_t stream);
template void bitMatNot<ushort>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, int num_channels, hipStream_t stream);
template void bitMatNot<uint>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, int num_channels, hipStream_t stream);
template void bitMatAnd<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, hipStream_t stream);
template void bitMatAnd<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, hipStream_t stream);
template void bitMatAnd<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, hipStream_t stream);
template void bitMatOr<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, hipStream_t stream);
template void bitMatOr<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, hipStream_t stream);
template void bitMatOr<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, hipStream_t stream);
template void bitMatXor<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, hipStream_t stream);
template void bitMatXor<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, hipStream_t stream);
template void bitMatXor<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// bitScalar
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< binder2nd< bit_and<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< bit_or<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< bit_xor<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T> void bitScalarAnd(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(bit_and<T>(), src2), WithOutMask(), stream);
}
template <typename T> void bitScalarOr(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(bit_or<T>(), src2), WithOutMask(), stream);
}
template <typename T> void bitScalarXor(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(bit_xor<T>(), src2), WithOutMask(), stream);
}
template void bitScalarAnd<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void bitScalarAnd<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarAnd<int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarAnd<unsigned int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
#endif
template void bitScalarOr<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void bitScalarOr<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarOr<int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarOr<unsigned int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
#endif
template void bitScalarXor<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void bitScalarXor<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarXor<int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarXor<unsigned int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// min
namespace arithm
{
struct VMin4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmin4(a, b);
}
__host__ __device__ __forceinline__ VMin4() {}
__host__ __device__ __forceinline__ VMin4(const VMin4&) {}
};
////////////////////////////////////
struct VMin2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmin2(a, b);
}
__host__ __device__ __forceinline__ VMin2() {}
__host__ __device__ __forceinline__ VMin2(const VMin2&) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits< arithm::VMin4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VMin2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< minimum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< minimum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void minMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, VMin4(), WithOutMask(), stream);
}
void minMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, VMin2(), WithOutMask(), stream);
}
template <typename T> void minMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, minimum<T>(), WithOutMask(), stream);
}
template void minMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void minMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#endif
template void minMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void minMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#endif
template <typename T> void minScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(minimum<T>(), src2), WithOutMask(), stream);
}
#ifdef OPENCV_TINY_GPU_MODULE
template void minScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
#else
template void minScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// max
namespace arithm
{
struct VMax4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmax4(a, b);
}
__host__ __device__ __forceinline__ VMax4() {}
__host__ __device__ __forceinline__ VMax4(const VMax4&) {}
};
////////////////////////////////////
struct VMax2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmax2(a, b);
}
__host__ __device__ __forceinline__ VMax2() {}
__host__ __device__ __forceinline__ VMax2(const VMax2&) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits< arithm::VMax4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VMax2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< maximum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< maximum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void maxMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, VMax4(), WithOutMask(), stream);
}
void maxMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, VMax2(), WithOutMask(), stream);
}
template <typename T> void maxMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, maximum<T>(), WithOutMask(), stream);
}
template void maxMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void maxMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#endif
template void maxMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void maxMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
#endif
template <typename T> void maxScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(maximum<T>(), src2), WithOutMask(), stream);
}
template void maxScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void maxScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
#endif
template void maxScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void maxScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// threshold
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< thresh_binary_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_binary_inv_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_trunc_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_to_zero_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_to_zero_inv_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <template <typename> class Op, typename T>
void threshold_caller(PtrStepSz<T> src, PtrStepSz<T> dst, T thresh, T maxVal, hipStream_t stream)
{
Op<T> op(thresh, maxVal);
transform(src, dst, op, WithOutMask(), stream);
}
template <typename T>
void threshold(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<T> dst, T thresh, T maxVal, hipStream_t stream);
static const caller_t callers[] =
{
threshold_caller<thresh_binary_func, T>,
threshold_caller<thresh_binary_inv_func, T>,
threshold_caller<thresh_trunc_func, T>,
threshold_caller<thresh_to_zero_func, T>,
threshold_caller<thresh_to_zero_inv_func, T>
};
callers[type]((PtrStepSz<T>) src, (PtrStepSz<T>) dst, static_cast<T>(thresh), static_cast<T>(maxVal), stream);
}
template void threshold<uchar>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void threshold<schar>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
template void threshold<ushort>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
template void threshold<short>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
template void threshold<int>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
#endif
template void threshold<float>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void threshold<double>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// pow
namespace arithm
{
template<typename T, bool Signed = numeric_limits<T>::is_signed> struct PowOp : unary_function<T, T>
{
float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ T operator()(T e) const
{
return saturate_cast<T>(__powf((float)e, power));
}
};
template<typename T> struct PowOp<T, true> : unary_function<T, T>
{
float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ T operator()(T e) const
{
T res = saturate_cast<T>(__powf((float)e, power));
if ((e < 0) && (1 & static_cast<int>(power)))
res *= -1;
return res;
}
};
template<> struct PowOp<float> : unary_function<float, float>
{
const float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ float operator()(float e) const
{
return __powf(::fabs(e), power);
}
};
template<> struct PowOp<double> : unary_function<double, double>
{
double power;
PowOp(double power_) : power(power_) {}
__device__ __forceinline__ double operator()(double e) const
{
return ::pow(::fabs(e), power);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< arithm::PowOp<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template<typename T>
void pow(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, PowOp<T>(power), WithOutMask(), stream);
}
#ifndef OPENCV_TINY_GPU_MODULE
template void pow<uchar>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
template void pow<schar>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
template void pow<short>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
template void pow<ushort>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
template void pow<int>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
#endif
template void pow<float>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void pow<double>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// addWeighted
namespace arithm
{
template <typename T> struct UseDouble_
{
enum {value = 0};
};
template <> struct UseDouble_<double>
{
enum {value = 1};
};
template <typename T1, typename T2, typename D> struct UseDouble
{
enum {value = (UseDouble_<T1>::value || UseDouble_<T2>::value || UseDouble_<D>::value)};
};
template <typename T1, typename T2, typename D, bool useDouble> struct AddWeighted_;
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, false> : binary_function<T1, T2, D>
{
float alpha;
float beta;
float gamma;
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(static_cast<float>(alpha_)), beta(static_cast<float>(beta_)), gamma(static_cast<float>(gamma_)) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
return saturate_cast<D>(a * alpha + b * beta + gamma);
}
};
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, true> : binary_function<T1, T2, D>
{
double alpha;
double beta;
double gamma;
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(alpha_), beta(beta_), gamma(gamma_) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
return saturate_cast<D>(a * alpha + b * beta + gamma);
}
};
template <typename T1, typename T2, typename D> struct AddWeighted : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>
{
AddWeighted(double alpha_, double beta_, double gamma_) : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>(alpha_, beta_, gamma_) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T1, typename T2, typename D, size_t src1_size, size_t src2_size, size_t dst_size> struct AddWeightedTraits : DefaultTransformFunctorTraits< arithm::AddWeighted<T1, T2, D> >
{
};
template <typename T1, typename T2, typename D, size_t src_size, size_t dst_size> struct AddWeightedTraits<T1, T2, D, src_size, src_size, dst_size> : arithm::ArithmFuncTraits<src_size, dst_size>
{
};
template <typename T1, typename T2, typename D> struct TransformFunctorTraits< arithm::AddWeighted<T1, T2, D> > : AddWeightedTraits<T1, T2, D, sizeof(T1), sizeof(T2), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T1, typename T2, typename D>
void addWeighted(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream)
{
AddWeighted<T1, T2, D> op(alpha, beta, gamma);
transform((PtrStepSz<T1>) src1, (PtrStepSz<T2>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void addWeighted<uchar, uchar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void addWeighted<uchar, uchar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
#endif
#ifndef OPENCV_TINY_GPU_MODULE
template void addWeighted<schar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
#endif
#ifndef OPENCV_TINY_GPU_MODULE
template void addWeighted<ushort, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
#endif
#ifndef OPENCV_TINY_GPU_MODULE
template void addWeighted<short, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
#endif
#ifndef OPENCV_TINY_GPU_MODULE
template void addWeighted<int, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
#endif
#ifndef OPENCV_TINY_GPU_MODULE
template void addWeighted<float, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
#endif
template void addWeighted<float, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void addWeighted<float, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
#endif
#ifndef OPENCV_TINY_GPU_MODULE
template void addWeighted<double, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
#endif
}
#endif /* CUDA_DISABLER */
|
b84de7d2b4180c17f78acdefd6e3875803a76c18.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/simd_functions.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace arithm
{
template <size_t src_size, size_t dst_size> struct ArithmFuncTraits
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 1 };
};
template <> struct ArithmFuncTraits<1, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<1, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<1, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
}
//////////////////////////////////////////////////////////////////////////
// addMat
namespace arithm
{
struct VAdd4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vadd4(a, b);
}
__host__ __device__ __forceinline__ VAdd4() {}
__host__ __device__ __forceinline__ VAdd4(const VAdd4&) {}
};
////////////////////////////////////
struct VAdd2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vadd2(a, b);
}
__host__ __device__ __forceinline__ VAdd2() {}
__host__ __device__ __forceinline__ VAdd2(const VAdd2&) {}
};
////////////////////////////////////
template <typename T, typename D> struct AddMat : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a + b);
}
__host__ __device__ __forceinline__ AddMat() {}
__host__ __device__ __forceinline__ AddMat(const AddMat&) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits< arithm::VAdd4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VAdd2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::AddMat<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void addMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, VAdd4(), WithOutMask(), stream);
}
void addMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, VAdd2(), WithOutMask(), stream);
}
template <typename T, typename D>
void addMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, AddMat<T, D>(), mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, AddMat<T, D>(), WithOutMask(), stream);
}
template void addMat<uchar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void addMat<uchar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<ushort, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<ushort, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<ushort, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<ushort, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<ushort, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<ushort, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<ushort, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<short, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<short, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<short, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<short, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<short, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<short, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<short, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<int, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<int, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<int, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<int, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<int, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<int, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
#endif
template void addMat<float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void addMat<float, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// addScalar
namespace arithm
{
template <typename T, typename S, typename D> struct AddScalar : unary_function<T, D>
{
S val;
explicit AddScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a + val);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::AddScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void addScalar(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
AddScalar<T, S, D> op(static_cast<S>(val));
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void addScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void addScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
#endif
template void addScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void addScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// subMat
namespace arithm
{
struct VSub4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vsub4(a, b);
}
__host__ __device__ __forceinline__ VSub4() {}
__host__ __device__ __forceinline__ VSub4(const VSub4&) {}
};
////////////////////////////////////
struct VSub2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vsub2(a, b);
}
__host__ __device__ __forceinline__ VSub2() {}
__host__ __device__ __forceinline__ VSub2(const VSub2&) {}
};
////////////////////////////////////
template <typename T, typename D> struct SubMat : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a - b);
}
__host__ __device__ __forceinline__ SubMat() {}
__host__ __device__ __forceinline__ SubMat(const SubMat&) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits< arithm::VSub4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VSub2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::SubMat<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void subMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, VSub4(), WithOutMask(), stream);
}
void subMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, VSub2(), WithOutMask(), stream);
}
template <typename T, typename D>
void subMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), WithOutMask(), stream);
}
template void subMat<uchar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void subMat<uchar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<ushort, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<ushort, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<ushort, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<ushort, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<ushort, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<ushort, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<ushort, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<short, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<short, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<short, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<short, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<short, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<short, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<short, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<int, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<int, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<int, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<int, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<int, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<int, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
#endif
template void subMat<float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void subMat<float, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// subScalar
namespace arithm
{
template <typename T, typename S, typename D>
void subScalar(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
AddScalar<T, S, D> op(-static_cast<S>(val));
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void subScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void subScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
#endif
template void subScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void subScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// mulMat
namespace arithm
{
struct Mul_8uc4_32f : binary_function<uint, float, uint>
{
__device__ __forceinline__ uint operator ()(uint a, float b) const
{
uint res = 0;
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
return res;
}
__host__ __device__ __forceinline__ Mul_8uc4_32f() {}
__host__ __device__ __forceinline__ Mul_8uc4_32f(const Mul_8uc4_32f&) {}
};
struct Mul_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return make_short4(saturate_cast<short>(a.x * b), saturate_cast<short>(a.y * b),
saturate_cast<short>(a.z * b), saturate_cast<short>(a.w * b));
}
__host__ __device__ __forceinline__ Mul_16sc4_32f() {}
__host__ __device__ __forceinline__ Mul_16sc4_32f(const Mul_16sc4_32f&) {}
};
template <typename T, typename D> struct Mul : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a * b);
}
__host__ __device__ __forceinline__ Mul() {}
__host__ __device__ __forceinline__ Mul(const Mul&) {}
};
template <typename T, typename S, typename D> struct MulScale : binary_function<T, T, D>
{
S scale;
explicit MulScale(S scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(scale * a * b);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits<arithm::Mul_8uc4_32f> : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T, typename D> struct TransformFunctorTraits< arithm::Mul<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScale<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void mulMat_8uc4_32f(PtrStepSz<uint> src1, PtrStepSzf src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, Mul_8uc4_32f(), WithOutMask(), stream);
}
void mulMat_16sc4_32f(PtrStepSz<short4> src1, PtrStepSzf src2, PtrStepSz<short4> dst, cudaStream_t stream)
{
transform(src1, src2, dst, Mul_16sc4_32f(), WithOutMask(), stream);
}
template <typename T, typename S, typename D>
void mulMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream)
{
if (scale == 1)
{
Mul<T, D> op;
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
else
{
MulScale<T, S, D> op(static_cast<S>(scale));
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
}
template void mulMat<uchar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void mulMat<uchar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<ushort, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<ushort, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<short, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<short, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<int, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<int, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<int, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<int, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<int, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<int, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<int, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
#endif
template void mulMat<float, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void mulMat<float, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<double, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// mulScalar
namespace arithm
{
template <typename T, typename S, typename D> struct MulScalar : unary_function<T, D>
{
S val;
explicit MulScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a * val);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void mulScalar(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
{
MulScalar<T, S, D> op(static_cast<S>(val));
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void mulScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void mulScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
#endif
template void mulScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void mulScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// divMat
namespace arithm
{
struct Div_8uc4_32f : binary_function<uint, float, uint>
{
__device__ __forceinline__ uint operator ()(uint a, float b) const
{
uint res = 0;
if (b != 0)
{
b = 1.0f / b;
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
}
return res;
}
};
struct Div_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return b != 0 ? make_short4(saturate_cast<short>(a.x / b), saturate_cast<short>(a.y / b),
saturate_cast<short>(a.z / b), saturate_cast<short>(a.w / b))
: make_short4(0,0,0,0);
}
};
template <typename T, typename D> struct Div : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return b != 0 ? saturate_cast<D>(a / b) : 0;
}
__host__ __device__ __forceinline__ Div() {}
__host__ __device__ __forceinline__ Div(const Div&) {}
};
template <typename T> struct Div<T, float> : binary_function<T, T, float>
{
__device__ __forceinline__ float operator ()(T a, T b) const
{
return b != 0 ? static_cast<float>(a) / b : 0;
}
__host__ __device__ __forceinline__ Div() {}
__host__ __device__ __forceinline__ Div(const Div&) {}
};
template <typename T> struct Div<T, double> : binary_function<T, T, double>
{
__device__ __forceinline__ double operator ()(T a, T b) const
{
return b != 0 ? static_cast<double>(a) / b : 0;
}
__host__ __device__ __forceinline__ Div() {}
__host__ __device__ __forceinline__ Div(const Div&) {}
};
template <typename T, typename S, typename D> struct DivScale : binary_function<T, T, D>
{
S scale;
explicit DivScale(S scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
return b != 0 ? saturate_cast<D>(scale * a / b) : 0;
}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits<arithm::Div_8uc4_32f> : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T, typename D> struct TransformFunctorTraits< arithm::Div<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivScale<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void divMat_8uc4_32f(PtrStepSz<uint> src1, PtrStepSzf src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, Div_8uc4_32f(), WithOutMask(), stream);
}
void divMat_16sc4_32f(PtrStepSz<short4> src1, PtrStepSzf src2, PtrStepSz<short4> dst, cudaStream_t stream)
{
transform(src1, src2, dst, Div_16sc4_32f(), WithOutMask(), stream);
}
template <typename T, typename S, typename D>
void divMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream)
{
if (scale == 1)
{
Div<T, D> op;
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
else
{
DivScale<T, S, D> op(static_cast<S>(scale));
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
}
template void divMat<uchar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void divMat<uchar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<ushort, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<ushort, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<ushort, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<ushort, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<ushort, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<ushort, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<ushort, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<short, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<short, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<short, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<short, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<short, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<short, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<short, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<int, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<int, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<int, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<int, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<int, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<int, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<int, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<float, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<float, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<float, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<float, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<float, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
#endif
template void divMat<float, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void divMat<float, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<double, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// divScalar
namespace arithm
{
template <typename T, typename S, typename D>
void divScalar(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
{
MulScalar<T, S, D> op(static_cast<S>(1.0 / val));
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void divScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void divScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
#endif
template void divScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void divScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// divInv
namespace arithm
{
template <typename T, typename S, typename D> struct DivInv : unary_function<T, D>
{
S val;
explicit DivInv(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return a != 0 ? saturate_cast<D>(val / a) : 0;
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivInv<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void divInv(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
{
DivInv<T, S, D> op(static_cast<S>(val));
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void divInv<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void divInv<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
#endif
template void divInv<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void divInv<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// absDiffMat
namespace arithm
{
struct VAbsDiff4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vabsdiff4(a, b);
}
__host__ __device__ __forceinline__ VAbsDiff4() {}
__host__ __device__ __forceinline__ VAbsDiff4(const VAbsDiff4&) {}
};
////////////////////////////////////
struct VAbsDiff2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vabsdiff2(a, b);
}
__host__ __device__ __forceinline__ VAbsDiff2() {}
__host__ __device__ __forceinline__ VAbsDiff2(const VAbsDiff2&) {}
};
////////////////////////////////////
__device__ __forceinline__ int _abs(int a)
{
return ::abs(a);
}
__device__ __forceinline__ float _abs(float a)
{
return ::fabsf(a);
}
__device__ __forceinline__ double _abs(double a)
{
return ::fabs(a);
}
template <typename T> struct AbsDiffMat : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(T a, T b) const
{
return saturate_cast<T>(_abs(a - b));
}
__host__ __device__ __forceinline__ AbsDiffMat() {}
__host__ __device__ __forceinline__ AbsDiffMat(const AbsDiffMat&) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits< arithm::VAbsDiff4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VAbsDiff2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< arithm::AbsDiffMat<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void absDiffMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, VAbsDiff4(), WithOutMask(), stream);
}
void absDiffMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, VAbsDiff2(), WithOutMask(), stream);
}
template <typename T>
void absDiffMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, AbsDiffMat<T>(), WithOutMask(), stream);
}
template void absDiffMat<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void absDiffMat<schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#endif
template void absDiffMat<float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void absDiffMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// absDiffScalar
namespace arithm
{
template <typename T, typename S> struct AbsDiffScalar : unary_function<T, T>
{
S val;
explicit AbsDiffScalar(S val_) : val(val_) {}
__device__ __forceinline__ T operator ()(T a) const
{
abs_func<S> f;
return saturate_cast<T>(f(a - val));
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename S> struct TransformFunctorTraits< arithm::AbsDiffScalar<T, S> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T, typename S>
void absDiffScalar(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
{
AbsDiffScalar<T, S> op(static_cast<S>(val));
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, op, WithOutMask(), stream);
}
template void absDiffScalar<uchar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void absDiffScalar<schar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<ushort, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<short, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<int, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
#endif
template void absDiffScalar<float, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void absDiffScalar<double, double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// absMat
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< abs_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void absMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, abs_func<T>(), WithOutMask(), stream);
}
#ifndef OPENCV_TINY_GPU_MODULE
template void absMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void absMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void absMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void absMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void absMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
#endif
template void absMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void absMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// sqrMat
namespace arithm
{
template <typename T> struct Sqr : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(T x) const
{
return saturate_cast<T>(x * x);
}
__host__ __device__ __forceinline__ Sqr() {}
__host__ __device__ __forceinline__ Sqr(const Sqr&) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< arithm::Sqr<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void sqrMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, Sqr<T>(), WithOutMask(), stream);
}
#ifndef OPENCV_TINY_GPU_MODULE
template void sqrMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
#endif
template void sqrMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void sqrMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// sqrtMat
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< sqrt_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void sqrtMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, sqrt_func<T>(), WithOutMask(), stream);
}
#ifndef OPENCV_TINY_GPU_MODULE
template void sqrtMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrtMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrtMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrtMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrtMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
#endif
template void sqrtMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void sqrtMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// logMat
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< log_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void logMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, log_func<T>(), WithOutMask(), stream);
}
#ifndef OPENCV_TINY_GPU_MODULE
template void logMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void logMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void logMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void logMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void logMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
#endif
template void logMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void logMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// expMat
namespace arithm
{
template <typename T> struct Exp : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(T x) const
{
exp_func<T> f;
return saturate_cast<T>(f(x));
}
__host__ __device__ __forceinline__ Exp() {}
__host__ __device__ __forceinline__ Exp(const Exp&) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< arithm::Exp<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void expMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, Exp<T>(), WithOutMask(), stream);
}
#ifndef OPENCV_TINY_GPU_MODULE
template void expMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void expMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void expMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void expMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void expMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
#endif
template void expMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void expMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////////////////
// cmpMat
namespace arithm
{
struct VCmpEq4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vcmpeq4(a, b);
}
__host__ __device__ __forceinline__ VCmpEq4() {}
__host__ __device__ __forceinline__ VCmpEq4(const VCmpEq4&) {}
};
struct VCmpNe4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vcmpne4(a, b);
}
__host__ __device__ __forceinline__ VCmpNe4() {}
__host__ __device__ __forceinline__ VCmpNe4(const VCmpNe4&) {}
};
struct VCmpLt4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vcmplt4(a, b);
}
__host__ __device__ __forceinline__ VCmpLt4() {}
__host__ __device__ __forceinline__ VCmpLt4(const VCmpLt4&) {}
};
struct VCmpLe4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vcmple4(a, b);
}
__host__ __device__ __forceinline__ VCmpLe4() {}
__host__ __device__ __forceinline__ VCmpLe4(const VCmpLe4&) {}
};
////////////////////////////////////
template <class Op, typename T>
struct Cmp : binary_function<T, T, uchar>
{
__device__ __forceinline__ uchar operator()(T a, T b) const
{
Op op;
return -op(a, b);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits< arithm::VCmpEq4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VCmpNe4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VCmpLt4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VCmpLe4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <class Op, typename T> struct TransformFunctorTraits< arithm::Cmp<Op, T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
{
};
}}}
namespace arithm
{
void cmpMatEq_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, VCmpEq4(), WithOutMask(), stream);
}
void cmpMatNe_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, VCmpNe4(), WithOutMask(), stream);
}
void cmpMatLt_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, VCmpLt4(), WithOutMask(), stream);
}
void cmpMatLe_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, VCmpLe4(), WithOutMask(), stream);
}
template <template <typename> class Op, typename T>
void cmpMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
Cmp<Op<T>, T> op;
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, dst, op, WithOutMask(), stream);
}
template <typename T> void cmpMatEq(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cmpMat<equal_to, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatNe(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cmpMat<not_equal_to, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatLt(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cmpMat<less, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatLe(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cmpMat<less_equal, T>(src1, src2, dst, stream);
}
template void cmpMatEq<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpMatEq<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatEq<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatEq<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatEq<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpMatEq<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpMatEq<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpMatNe<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpMatNe<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpMatNe<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpMatNe<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpMatLt<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpMatLt<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpMatLt<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpMatLt<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpMatLe<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpMatLe<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpMatLe<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpMatLe<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////////////////
// cmpScalar
namespace arithm
{
#define TYPE_VEC(type, cn) typename TypeVec<type, cn>::vec_type
template <class Op, typename T, int cn> struct CmpScalar;
template <class Op, typename T>
struct CmpScalar<Op, T, 1> : unary_function<T, uchar>
{
const T val;
__host__ explicit CmpScalar(T val_) : val(val_) {}
__device__ __forceinline__ uchar operator()(T src) const
{
Cmp<Op, T> op;
return op(src, val);
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 2> : unary_function<TYPE_VEC(T, 2), TYPE_VEC(uchar, 2)>
{
const TYPE_VEC(T, 2) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 2) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 2) operator()(const TYPE_VEC(T, 2) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 2)>::make(op(src.x, val.x), op(src.y, val.y));
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 3> : unary_function<TYPE_VEC(T, 3), TYPE_VEC(uchar, 3)>
{
const TYPE_VEC(T, 3) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 3) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 3) operator()(const TYPE_VEC(T, 3) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 3)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z));
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 4> : unary_function<TYPE_VEC(T, 4), TYPE_VEC(uchar, 4)>
{
const TYPE_VEC(T, 4) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 4) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 4) operator()(const TYPE_VEC(T, 4) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 4)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z), op(src.w, val.w));
}
};
#undef TYPE_VEC
}
namespace cv { namespace gpu { namespace device
{
template <class Op, typename T> struct TransformFunctorTraits< arithm::CmpScalar<Op, T, 1> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
{
};
}}}
namespace arithm
{
template <template <typename> class Op, typename T, int cn>
void cmpScalar(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type src_t;
typedef typename TypeVec<uchar, cn>::vec_type dst_t;
T sval[] = {static_cast<T>(val[0]), static_cast<T>(val[1]), static_cast<T>(val[2]), static_cast<T>(val[3])};
src_t val1 = VecTraits<src_t>::make(sval);
CmpScalar<Op<T>, T, cn> op(val1);
transform((PtrStepSz<src_t>) src, (PtrStepSz<dst_t>) dst, op, WithOutMask(), stream);
}
template <typename T> void cmpScalarEq(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<equal_to, T, 1>,
cmpScalar<equal_to, T, 2>,
cmpScalar<equal_to, T, 3>,
cmpScalar<equal_to, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarNe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<not_equal_to, T, 1>,
cmpScalar<not_equal_to, T, 2>,
cmpScalar<not_equal_to, T, 3>,
cmpScalar<not_equal_to, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarLt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<less, T, 1>,
cmpScalar<less, T, 2>,
cmpScalar<less, T, 3>,
cmpScalar<less, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarLe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<less_equal, T, 1>,
cmpScalar<less_equal, T, 2>,
cmpScalar<less_equal, T, 3>,
cmpScalar<less_equal, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarGt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<greater, T, 1>,
cmpScalar<greater, T, 2>,
cmpScalar<greater, T, 3>,
cmpScalar<greater, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarGe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<greater_equal, T, 1>,
cmpScalar<greater_equal, T, 2>,
cmpScalar<greater_equal, T, 3>,
cmpScalar<greater_equal, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template void cmpScalarEq<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarEq<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpScalarEq<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarEq<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpScalarNe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarNe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpScalarNe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarNe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpScalarLt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarLt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpScalarLt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarLt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpScalarLe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarLe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpScalarLe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarLe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpScalarGt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarGt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpScalarGt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarGt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpScalarGe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarGe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#endif
template void cmpScalarGe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void cmpScalarGe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////////////////
// bitMat
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< bit_not<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_and<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_or<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_xor<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T> void bitMatNot(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, int num_channels, cudaStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, bit_not<T>(), SingleMaskChannels(mask, num_channels), stream);
else
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, bit_not<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatAnd(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, cudaStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_and<T>(), SingleMaskChannels(mask, num_channels), stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_and<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatOr(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, cudaStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_or<T>(), SingleMaskChannels(mask, num_channels), stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_or<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatXor(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, cudaStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_xor<T>(), SingleMaskChannels(mask, num_channels), stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_xor<T>(), WithOutMask(), stream);
}
template void bitMatNot<uchar>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, int num_channels, cudaStream_t stream);
template void bitMatNot<ushort>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, int num_channels, cudaStream_t stream);
template void bitMatNot<uint>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, int num_channels, cudaStream_t stream);
template void bitMatAnd<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, cudaStream_t stream);
template void bitMatAnd<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, cudaStream_t stream);
template void bitMatAnd<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, cudaStream_t stream);
template void bitMatOr<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, cudaStream_t stream);
template void bitMatOr<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, cudaStream_t stream);
template void bitMatOr<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, cudaStream_t stream);
template void bitMatXor<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, cudaStream_t stream);
template void bitMatXor<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, cudaStream_t stream);
template void bitMatXor<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, int num_channels, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// bitScalar
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< binder2nd< bit_and<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< bit_or<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< bit_xor<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T> void bitScalarAnd(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(bit_and<T>(), src2), WithOutMask(), stream);
}
template <typename T> void bitScalarOr(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(bit_or<T>(), src2), WithOutMask(), stream);
}
template <typename T> void bitScalarXor(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(bit_xor<T>(), src2), WithOutMask(), stream);
}
template void bitScalarAnd<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void bitScalarAnd<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarAnd<int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarAnd<unsigned int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
#endif
template void bitScalarOr<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void bitScalarOr<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarOr<int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarOr<unsigned int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
#endif
template void bitScalarXor<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void bitScalarXor<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarXor<int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarXor<unsigned int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// min
namespace arithm
{
struct VMin4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmin4(a, b);
}
__host__ __device__ __forceinline__ VMin4() {}
__host__ __device__ __forceinline__ VMin4(const VMin4&) {}
};
////////////////////////////////////
struct VMin2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmin2(a, b);
}
__host__ __device__ __forceinline__ VMin2() {}
__host__ __device__ __forceinline__ VMin2(const VMin2&) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits< arithm::VMin4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VMin2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< minimum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< minimum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void minMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, VMin4(), WithOutMask(), stream);
}
void minMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, VMin2(), WithOutMask(), stream);
}
template <typename T> void minMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, minimum<T>(), WithOutMask(), stream);
}
template void minMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void minMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#endif
template void minMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void minMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#endif
template <typename T> void minScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(minimum<T>(), src2), WithOutMask(), stream);
}
#ifdef OPENCV_TINY_GPU_MODULE
template void minScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
#else
template void minScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// max
namespace arithm
{
struct VMax4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmax4(a, b);
}
__host__ __device__ __forceinline__ VMax4() {}
__host__ __device__ __forceinline__ VMax4(const VMax4&) {}
};
////////////////////////////////////
struct VMax2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmax2(a, b);
}
__host__ __device__ __forceinline__ VMax2() {}
__host__ __device__ __forceinline__ VMax2(const VMax2&) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits< arithm::VMax4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VMax2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< maximum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< maximum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void maxMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, VMax4(), WithOutMask(), stream);
}
void maxMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, VMax2(), WithOutMask(), stream);
}
template <typename T> void maxMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, maximum<T>(), WithOutMask(), stream);
}
template void maxMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void maxMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#endif
template void maxMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void maxMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
#endif
template <typename T> void maxScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(maximum<T>(), src2), WithOutMask(), stream);
}
template void maxScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void maxScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
#endif
template void maxScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void maxScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// threshold
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< thresh_binary_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_binary_inv_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_trunc_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_to_zero_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_to_zero_inv_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <template <typename> class Op, typename T>
void threshold_caller(PtrStepSz<T> src, PtrStepSz<T> dst, T thresh, T maxVal, cudaStream_t stream)
{
Op<T> op(thresh, maxVal);
transform(src, dst, op, WithOutMask(), stream);
}
template <typename T>
void threshold(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<T> dst, T thresh, T maxVal, cudaStream_t stream);
static const caller_t callers[] =
{
threshold_caller<thresh_binary_func, T>,
threshold_caller<thresh_binary_inv_func, T>,
threshold_caller<thresh_trunc_func, T>,
threshold_caller<thresh_to_zero_func, T>,
threshold_caller<thresh_to_zero_inv_func, T>
};
callers[type]((PtrStepSz<T>) src, (PtrStepSz<T>) dst, static_cast<T>(thresh), static_cast<T>(maxVal), stream);
}
template void threshold<uchar>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void threshold<schar>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
template void threshold<ushort>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
template void threshold<short>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
template void threshold<int>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
#endif
template void threshold<float>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void threshold<double>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// pow
namespace arithm
{
template<typename T, bool Signed = numeric_limits<T>::is_signed> struct PowOp : unary_function<T, T>
{
float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ T operator()(T e) const
{
return saturate_cast<T>(__powf((float)e, power));
}
};
template<typename T> struct PowOp<T, true> : unary_function<T, T>
{
float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ T operator()(T e) const
{
T res = saturate_cast<T>(__powf((float)e, power));
if ((e < 0) && (1 & static_cast<int>(power)))
res *= -1;
return res;
}
};
template<> struct PowOp<float> : unary_function<float, float>
{
const float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ float operator()(float e) const
{
return __powf(::fabs(e), power);
}
};
template<> struct PowOp<double> : unary_function<double, double>
{
double power;
PowOp(double power_) : power(power_) {}
__device__ __forceinline__ double operator()(double e) const
{
return ::pow(::fabs(e), power);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< arithm::PowOp<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template<typename T>
void pow(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, PowOp<T>(power), WithOutMask(), stream);
}
#ifndef OPENCV_TINY_GPU_MODULE
template void pow<uchar>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
template void pow<schar>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
template void pow<short>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
template void pow<ushort>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
template void pow<int>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
#endif
template void pow<float>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void pow<double>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
#endif
}
//////////////////////////////////////////////////////////////////////////
// addWeighted
namespace arithm
{
template <typename T> struct UseDouble_
{
enum {value = 0};
};
template <> struct UseDouble_<double>
{
enum {value = 1};
};
template <typename T1, typename T2, typename D> struct UseDouble
{
enum {value = (UseDouble_<T1>::value || UseDouble_<T2>::value || UseDouble_<D>::value)};
};
template <typename T1, typename T2, typename D, bool useDouble> struct AddWeighted_;
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, false> : binary_function<T1, T2, D>
{
float alpha;
float beta;
float gamma;
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(static_cast<float>(alpha_)), beta(static_cast<float>(beta_)), gamma(static_cast<float>(gamma_)) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
return saturate_cast<D>(a * alpha + b * beta + gamma);
}
};
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, true> : binary_function<T1, T2, D>
{
double alpha;
double beta;
double gamma;
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(alpha_), beta(beta_), gamma(gamma_) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
return saturate_cast<D>(a * alpha + b * beta + gamma);
}
};
template <typename T1, typename T2, typename D> struct AddWeighted : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>
{
AddWeighted(double alpha_, double beta_, double gamma_) : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>(alpha_, beta_, gamma_) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T1, typename T2, typename D, size_t src1_size, size_t src2_size, size_t dst_size> struct AddWeightedTraits : DefaultTransformFunctorTraits< arithm::AddWeighted<T1, T2, D> >
{
};
template <typename T1, typename T2, typename D, size_t src_size, size_t dst_size> struct AddWeightedTraits<T1, T2, D, src_size, src_size, dst_size> : arithm::ArithmFuncTraits<src_size, dst_size>
{
};
template <typename T1, typename T2, typename D> struct TransformFunctorTraits< arithm::AddWeighted<T1, T2, D> > : AddWeightedTraits<T1, T2, D, sizeof(T1), sizeof(T2), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T1, typename T2, typename D>
void addWeighted(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream)
{
AddWeighted<T1, T2, D> op(alpha, beta, gamma);
transform((PtrStepSz<T1>) src1, (PtrStepSz<T2>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void addWeighted<uchar, uchar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void addWeighted<uchar, uchar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
#endif
#ifndef OPENCV_TINY_GPU_MODULE
template void addWeighted<schar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
#endif
#ifndef OPENCV_TINY_GPU_MODULE
template void addWeighted<ushort, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
#endif
#ifndef OPENCV_TINY_GPU_MODULE
template void addWeighted<short, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
#endif
#ifndef OPENCV_TINY_GPU_MODULE
template void addWeighted<int, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
#endif
#ifndef OPENCV_TINY_GPU_MODULE
template void addWeighted<float, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
#endif
template void addWeighted<float, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
#ifndef OPENCV_TINY_GPU_MODULE
template void addWeighted<float, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
#endif
#ifndef OPENCV_TINY_GPU_MODULE
template void addWeighted<double, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
#endif
}
#endif /* CUDA_DISABLER */
|
dea98a5e1afa65f5689f5fd893107210c670007b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hydro.cuh"
namespace hydro {
__device__
float d_gaussFalloff(float x, float y, float cx, float cy, float radius) {
float val = expf(-((((x - cx)*(x - cx) + (y - cy)*(y - cy)) * 4) / (radius*radius)));
return val < 0.01 ? 0 : val;
}
__device__
bool isOutside(int c, int r, int w, int h) {
return ((c < 0) || (r<0) || (c >= w) || (r >= h));
}
__device__
bool isBorder(int c, int r, int w, int h) {
return ((c == 0) || (r == 0) || (c == (w - 1) || (r == (h - 1))));
}
__global__
void d_erodeWaterHydraulic(int* dd_intParams, float* dd_floatParams, float** dd_terrainData, float * d_extra, int* dd_materialIndex, float** dd_materialData) {
// Prerequisites: call genSlopes() and conduct a water flow update to have the latest WATER_LAST and SLOPE_SIN before execution.
// dissolves material into particles from the top.
//indexing
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if (isOutside(c, r, DW, DH)) { return; }
const int cudaindex = r*DW + c;
//hydraulic erosion
float dissolutionCapacity = 0.001;
if (dissolutionCapacity > WATER[cudaindex]) {
dissolutionCapacity = WATER[cudaindex];
}
if (REGOLITH[cudaindex] > dissolutionCapacity) {
DUST[cudaindex] += REGOLITH[cudaindex] - dissolutionCapacity;
REGOLITH[cudaindex] = dissolutionCapacity;
}
else {
float remainingHydro = dissolutionCapacity - REGOLITH[cudaindex];
for (int i = DLEN - 1; i >= 0; --i) {
const float requestHydro = /*MDATA[MATERIAL[i]]MHYDRO */ remainingHydro;
if (requestHydro > dd_terrainData[i][cudaindex]) {
dd_terrainData[i][cudaindex] = 0;
remainingHydro -= requestHydro;
}
else {
dd_terrainData[i][cudaindex] -= requestHydro;
remainingHydro -= requestHydro;
break;
}
}
REGOLITH[cudaindex] += dissolutionCapacity - remainingHydro - REGOLITH[cudaindex];
}
}
__global__
void d_erodeWaterKinetic(int* dd_intParams, float* dd_floatParams, float** dd_terrainData, float * d_extra, int* dd_materialIndex, float** dd_materialData) {
// Prerequisites: call genSlopes() and conduct a water flow update to have the latest WATER_LAST and SLOPE_SIN before execution.
// dissolves material into particles from the top.
//indexing
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if (isOutside(c, r, DW, DH)) { return; }
const int cudaindex = r*DW + c;
//sediment transport capacity
//S = v * C * sin(alpha)
const float velocity = sqrt(WATER_CELL_VERT[cudaindex] * WATER_CELL_VERT[cudaindex] + WATER_CELL_HOR[cudaindex] * WATER_CELL_HOR[cudaindex]);
//set minimal transport capacity with respect to slope to avoid no effect on flat surfaces.
float transportCapacity = 0.1;
if (SLOPE_SIN[cudaindex] < 0.1) {
transportCapacity *= velocity * 0.1;
}
else {
transportCapacity *= velocity * SLOPE_SIN[cudaindex];
}
const float DISSOLVE = 1;
const float SETTLE = 1;
//compate material levels to capacity
if (SEDIMENT[cudaindex] > transportCapacity) {
//deposit
float delta = SETTLE*(SEDIMENT[cudaindex] - transportCapacity);
DUST[cudaindex] += delta;
SEDIMENT[cudaindex] -= delta;
}
else {
//erode
float delta = DISSOLVE*(transportCapacity - SEDIMENT[cudaindex]);
//start removing material from the terrain.
float remaining = delta - SEDIMENT[cudaindex];
for (int i = DLEN - 1; i >= 0; --i) {
const float request = MDATA[MATERIAL[i]]MHYDRO * remaining;
if(request > dd_terrainData[i][cudaindex]) {
dd_terrainData[i][cudaindex] = 0;
remaining -= request;
}
else {
dd_terrainData[i][cudaindex] -= request;
remaining -= request;
break;
}
}
//
SEDIMENT[cudaindex] += delta-remaining;
}
}
__global__
void d_erodeWaterSemiLag1(int* dd_intParams, float* dd_floatParams, float** dd_terrainData, float * d_extra, int* dd_materialIndex, float** dd_materialData) {
//indexing
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if (isOutside(c, r, DW, DH)) { return; }
const int cudaindex = r*DW + c;
//move material
const float scale = 20;
const float oldx = c - (WATER_CELL_HOR[cudaindex] * scale);
const float oldy = r -(WATER_CELL_VERT[cudaindex] * scale);
const int foldx = floorf(oldx);
const int foldy = floorf(oldy);
//weight with which to pick each
float wx = oldx - foldx;
float wy = oldy - foldy;
float old = (oldy)*DW + (oldx);
const int idx[4] = { (foldy)*DW + (foldx), (foldy)*DW + (foldx+1), (foldy+1)*DW + (foldx), (foldy+1)*DW + (foldx+1) };
const bool valid[4] = { idx[0]>0 && idx[0]<DW*DH, idx[1]>0 && idx[1]<DW*DH, idx[2]>0 && idx[2]<DW*DH, idx[3]>0 && idx[3]<DW*DH };
//
// [foldx, foldy] wx > [foldx+1, foldy]
// wy V
// [foldx, foldy+1] [foldx+1, foldy+1]
//
float amountSediment = 0;
float amountRegolith = 0;
float div;
if (valid[0]) { amountSediment += SEDIMENT[idx[0]] * (1 - wy)*(1 - wx);
amountRegolith += REGOLITH[idx[0]] * (1 - wy)*(1 - wx); }
if (valid[1]) { amountSediment += SEDIMENT[idx[1]] * (1 - wy)*(wx);
amountRegolith += REGOLITH[idx[1]] * (1 - wy)*(wx); }
if (valid[2]) { amountSediment += SEDIMENT[idx[2]] * (wy)*(1 - wx);
amountRegolith += REGOLITH[idx[2]] * (wy)*(1 - wx); }
if (valid[3]) { amountSediment += SEDIMENT[idx[3]] * (wy)*(wx);
amountRegolith += REGOLITH[idx[3]] * (wy)*(wx); }
for (unsigned int i = 0; i < 4; ++i) {
if (valid[i]) {
++div;
}
}
//compensate for missing cells
if (div > 0) {
amountSediment *= 4 / div;
d_extra[cudaindex] = amountSediment;
d_extra[DW*DH+cudaindex] = amountRegolith;
}
//
// Continue after CPU synch
// ...
//
}
__global__
void d_erodeWaterSemiLag2(int* dd_intParams, float* dd_floatParams, float** dd_terrainData, float * d_extra, int* dd_materialIndex, float** dd_materialData) {
// just adds up the values computed temporarily
//indexing
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if (isOutside(c, r, DW, DH)) { return; }
const int cudaindex = r*DW + c;
SEDIMENT[cudaindex] = d_extra[cudaindex];
REGOLITH[cudaindex] = d_extra[DW*DH + cudaindex];
}
__global__
void d_simWaterA(int* dd_intParams, float* dd_floatParams, float** dd_terrainData, float * d_extra, int* d_materialIndex, float** d_materialData) {
// 1 / 4 - compute pressure differences for each CELL. OUT: Pass accelerations to pipes
//indexing
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if (isOutside(c, r, DW, DH)) { return; }
const int cudaindex = r*DW + c;
const int offset = (DW*DH);
//add amount from sprinklers
float val = 0;
for (unsigned int i = 0; i < DPOLY_SPR_LEN; ++i) {
val += DSPRINKLER_STR * d_gaussFalloff(c, r, POLY_SPR[i*3] * 2, POLY_SPR[i*3+2] * 2, DSPRINKLER_RADIUS);
}
WATER[cudaindex] += val;
//pipe indexes: WEST, NORTH, EAST, SOUTH
const int idx[4] = { cudaindex - 1, cudaindex - DW, cudaindex+1, cudaindex + DW };
bool valid[4] = { idx[0] >= 0, idx[1] >= 0, idx[2] < DW*DH, idx[3] < DW*DH };
const int dir[4] = { -1, -1, 1, 1 };
const int pipeidx[4] = { cudaindex - 1, offset + cudaindex - DW, cudaindex, offset + cudaindex };
//bool pipevalid[4] = { pipeidx[0] >= 0 && pipeidx[0]%DW!=DW-1, pipeidx[1] - offset >= 0, pipeidx[2] < DW*DH, pipeidx[3] - offset < DW*(DH-1) };
float dif[4] = { 0,0,0,0 };
//compute
float myh = SUMS[cudaindex] + WATER[cudaindex] + SEDIMENT[cudaindex] + REGOLITH[cudaindex];
for (unsigned char i = 0; i < 4; ++i) {
if (valid[i]) {
dif[i] = myh - SUMS[idx[i]] - WATER[idx[i]] - SEDIMENT[idx[i]] - REGOLITH[idx[i]];
if (dif[i] < 0) { //do not write into higher slots. (only write from above)
dif[i] = 0;
valid[i] = false;
}
}
}
//store accelerations
for (unsigned char i = 0; i < 4; ++i) {
if (valid[i]) {
d_extra[pipeidx[i]] = (float)(dir[i] * dif[i]);
}
}
}
__global__
void d_simWaterB(int* dd_intParams, float* dd_floatParams, float** dd_terrainData, float * d_extra, int* d_materialIndex, float** d_materialData) {
// 2 / 4 - add accelerations to pipe values
//indexing
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if (isOutside(c, r, DW, DH)) { return; }
const int cudaindex = r*DW + c;
const int offset = DW*DH;
//add accelerations
WATER_HOR[cudaindex] += d_extra[cudaindex];
WATER_VERT[cudaindex] += d_extra[offset + cudaindex];
//nullify if source has less water.
if (WATER_HOR[cudaindex] > 0) {
if (WATER[cudaindex] < WATER_HOR[cudaindex]) { WATER_HOR[cudaindex] = WATER[cudaindex]; }
}
else {
if (WATER[cudaindex+1] < -WATER_HOR[cudaindex]) { WATER_HOR[cudaindex] = -WATER[cudaindex + 1]; }
}
//
if (WATER_VERT[cudaindex] > 0) {
if (WATER[cudaindex] < WATER_VERT[cudaindex]) { WATER_VERT[cudaindex] = WATER[cudaindex]; }
}
else {
if (WATER[cudaindex + DW] < -WATER_VERT[cudaindex]) { WATER_VERT[cudaindex] = -WATER[cudaindex + DW]; }
}
}
__global__
void d_simWaterC(int* dd_intParams, float* dd_floatParams, float** dd_terrainData, float * d_extra, int* d_materialIndex, float** d_materialData) {
// 3 / 4 - compute transported amounts of water in each cell based on speed in the pipes.
//indexing
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if (isOutside(c, r, DW, DH)) { return; }
const int cudaindex = r*DW + c;
const int extraidx = 5 * cudaindex;
const int offset = (DW*DH);
//transfer speeds
const int pipeidx[4] = { cudaindex - 1, cudaindex - DW, cudaindex, cudaindex };
const char pipedir[4] = { -1, -1, 1, 1 };
const bool pipevalid[4] = { pipeidx[0] >= 0 && pipeidx[0] % DW != DW - 1, pipeidx[1] >= 0, pipeidx[2] < DW*DH, pipeidx[3] < DW*(DH - 1) };
const int idx[4] = { cudaindex - 1, cudaindex - DW, cudaindex + 1, cudaindex + DW };
//const bool valid[4] = { idx[0] >= 0, idx[1] >= 0, idx[2] < DW*DH, idx[3] < DW*DH };
float dif[4] = { 0,0,0,0 };
//see demands
float sum = 0;
if (pipevalid[0]) {
dif[0] = -WATER_HOR[pipeidx[0]];
}
if (pipevalid[1]) {
dif[1] = -WATER_VERT[pipeidx[1]];
}
if (pipevalid[2]) {
dif[2] = WATER_HOR[pipeidx[2]];
}
if (pipevalid[3]) {
dif[3] = WATER_VERT[pipeidx[3]];
}
for (unsigned char i = 0; i < 4; ++i) {
if (dif[i] < 0) {
dif[i] = 0;
}
else {
sum += dif[i];
}
}
if (sum == 0) { return; }
float amount = sum;
if (amount > WATER[cudaindex]) {
amount = WATER[cudaindex];
}
amount /= 2;
for (unsigned char i = 0; i < 4; ++i) {
if (pipevalid[i]) {
dif[i] = amount * (dif[i] / sum);
d_extra[idx[i] * 5 + 1 + i] = dif[i];
}
}
d_extra[extraidx] = amount;
//calculate overal flows for use in kinetic hydro-erosion
WATER_CELL_VERT[cudaindex] = dif[3] - dif[1];
WATER_CELL_HOR[cudaindex] = dif[2] - dif[0];
}
__global__
void d_simWaterD(int* dd_intParams, float* dd_floatParams, float** dd_terrainData, float * d_extra, int* d_materialIndex, float** d_materialData) {
// 4 / 4 - Add up change in water volumes.
//indexing
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if (isOutside(c, r, DW, DH)) { return; }
const int cudaindex = r*DW + c;
const int extraidx = 5 * cudaindex;
//sum
float sum = -d_extra[extraidx];
for (unsigned char i = 0; i < 4; ++i) {
sum += d_extra[extraidx + 1 + i];
}
if (isBorder(c, r, DW, DH)) {
WATER[cudaindex] = 0;
SEDIMENT[cudaindex] = 0;
REGOLITH[cudaindex] = 0;
}
else {
float evaporate = DEVAPORATION/1000;
if (evaporate < 0) { evaporate = 0; }
WATER[cudaindex] += sum - evaporate;
if (WATER[cudaindex] < 0) { WATER[cudaindex] = 0; }
SEDIMENT[cudaindex] -= evaporate * 2;
if (SEDIMENT[cudaindex] < 0) { SEDIMENT[cudaindex] = 0; }
}
WATER_LAST[cudaindex] = (WATER[cudaindex] + WATER_LAST[cudaindex]) / 2;
}
instructionParam::myCudaParam_t params;
std::mutex m;
std::condition_variable cv;
unsigned int requested = 0;
bool active = true;
bool a_kinetic = true;
bool a_hydraulic = true;
volatile bool activityRequest = false;
void activity() {
//lock simMap
DWORD dwWaitResult;
dwWaitResult = WaitForSingleObject(
params.handle, // handle to mutex
100); // no time-out interval
if (dwWaitResult != WAIT_OBJECT_0) {
return;
}
instructionParam::passParams(params.simMap, params.idx, params.strength, params.radius, params.x, params.y, params.z, params.sprinklerStrength, params.sprinklerRadius, params.evaporation);
hipError_t err = hipGetLastError();
if (err != 0) {
printf("XXX PARAM Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
instructionParam::genSum(params.simMap, instructionParam::getIntPtr());
err = hipGetLastError();
if (err != 0) {
printf("XXX SUM Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
hipDeviceSynchronize();
float * d_working;
int worksize = 9 * params.simMap->getWidth() * params.simMap->getHeight();
float * h_working = new float[worksize];
for (int i = 0; i < worksize; ++i) {
h_working[i] = 0;
}
err = hipMalloc(&d_working, worksize * sizeof(float));
err = hipMemcpy(d_working, h_working, worksize * sizeof(float), hipMemcpyHostToDevice);
if (err != 0) {
printf("XXX COPY 1 Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
//
const dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
if (params.simMap == NULL) {
printf("NULL\n\n");
}
const dim3 gridSize = dim3((params.simMap->getWidth() / BLOCK_SIZE_X) + 1, (params.simMap->getHeight() / BLOCK_SIZE_Y) + 1);
d_simWaterA << <gridSize, blockSize >> > (
instructionParam::getIntPtr(), instructionParam::getFloatPtr(),
params.simMap->getDeviceLayerDataList(),
d_working,
params.simMap->getDeviceLayerMaterialIndexList(),
params.simMap->getDeviceMaterialDataList());
if (err != 0) {
printf("XXX WATERA Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
//CPU synchro
d_simWaterB << <gridSize, blockSize >> > (
instructionParam::getIntPtr(), instructionParam::getFloatPtr(),
params.simMap->getDeviceLayerDataList(),
d_working,
params.simMap->getDeviceLayerMaterialIndexList(),
params.simMap->getDeviceMaterialDataList());
if (err != 0) {
printf("XXX WATERB Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
//cleanse memory, we are repurposing it from now on.
hipDeviceSynchronize();
err = hipMemcpy(d_working, h_working, worksize * sizeof(float), hipMemcpyHostToDevice);
if (err != 0) {
printf("XXX COPY 2 Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
//CPU synchro
d_simWaterC << <gridSize, blockSize >> > (
instructionParam::getIntPtr(), instructionParam::getFloatPtr(),
params.simMap->getDeviceLayerDataList(),
d_working,
params.simMap->getDeviceLayerMaterialIndexList(),
params.simMap->getDeviceMaterialDataList());
//CPU synchro
d_simWaterD << <gridSize, blockSize >> > (
instructionParam::getIntPtr(), instructionParam::getFloatPtr(),
params.simMap->getDeviceLayerDataList(),
d_working,
params.simMap->getDeviceLayerMaterialIndexList(),
params.simMap->getDeviceMaterialDataList());
//
//Now that water has been updated compute erosion
//
//cleanse memory, we are repurposing it from now on.
hipDeviceSynchronize();
err = hipMemcpy(d_working, h_working, worksize * sizeof(float), hipMemcpyHostToDevice);
if (err != 0) {
printf("XXX COPY 3 Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
//
if (a_kinetic) {
d_erodeWaterKinetic << <gridSize, blockSize >> > (
instructionParam::getIntPtr(), instructionParam::getFloatPtr(),
params.simMap->getDeviceLayerDataList(),
d_working,
params.simMap->getDeviceLayerMaterialIndexList(),
params.simMap->getDeviceMaterialDataList());
}
//
if (a_hydraulic) {
d_erodeWaterHydraulic << <gridSize, blockSize >> > (
instructionParam::getIntPtr(), instructionParam::getFloatPtr(),
params.simMap->getDeviceLayerDataList(),
d_working,
params.simMap->getDeviceLayerMaterialIndexList(),
params.simMap->getDeviceMaterialDataList());
}
//CPU synchro
d_erodeWaterSemiLag1 << <gridSize, blockSize >> > (
instructionParam::getIntPtr(), instructionParam::getFloatPtr(),
params.simMap->getDeviceLayerDataList(),
d_working,
params.simMap->getDeviceLayerMaterialIndexList(),
params.simMap->getDeviceMaterialDataList());
//CPU synchro
d_erodeWaterSemiLag2 << <gridSize, blockSize >> > (
instructionParam::getIntPtr(), instructionParam::getFloatPtr(),
params.simMap->getDeviceLayerDataList(),
d_working,
params.simMap->getDeviceLayerMaterialIndexList(),
params.simMap->getDeviceMaterialDataList());
//
if (params.callback != NULL) {
params.callback();
}
//free
err = hipFree(d_working);
if (err != 0) {
printf("XXX FREE Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
free(h_working);
hipStreamSynchronize(0);
//unlock simMap
ReleaseMutex(params.handle);
err = hipGetLastError();
if (err != 0) {
printf("XXX Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
}
void worker_thread()
{
while (true) {
// Wait until main() sends data
std::unique_lock<std::mutex> lk(m);
cv.wait(lk, [] {return activityRequest; });
if (!active) { lk.unlock(); return; }
// after the wait, we own the lock.
//std::cout << "Hydro worker thread is active.\n";
activity();
// Manual unlocking is done before notifying, to avoid waking up
// the waiting thread only to block again (see notify_one for details)
lk.unlock();
activityRequest = false;
}
}
void killThread() {
printf("Terminating Hydro Thread\n");
active = false;
cv.notify_one();
}
void initThread() {
printf("Initing Hydro Thread\n");
std::thread worker(worker_thread);
worker.detach();
}
void simWater(float x, float y, float z, bool kinetic, bool hydraulic, int idx, float toolStregth, float toolRadius, float dir, SimMap* simMap, HANDLE handle, void(*callback)(), float sprinklerStrength, float sprinklerRadius, float evaporation) {
params = { x,y,z,toolRadius, toolStregth*dir, idx, simMap,handle,callback, sprinklerStrength, sprinklerRadius, evaporation };
a_kinetic = kinetic;
a_hydraulic = hydraulic;
//ping thread
activityRequest = true;
cv.notify_one();
}
}
|
dea98a5e1afa65f5689f5fd893107210c670007b.cu
|
#include "hydro.cuh"
namespace hydro {
__device__
float d_gaussFalloff(float x, float y, float cx, float cy, float radius) {
float val = expf(-((((x - cx)*(x - cx) + (y - cy)*(y - cy)) * 4) / (radius*radius)));
return val < 0.01 ? 0 : val;
}
__device__
bool isOutside(int c, int r, int w, int h) {
return ((c < 0) || (r<0) || (c >= w) || (r >= h));
}
__device__
bool isBorder(int c, int r, int w, int h) {
return ((c == 0) || (r == 0) || (c == (w - 1) || (r == (h - 1))));
}
__global__
void d_erodeWaterHydraulic(int* dd_intParams, float* dd_floatParams, float** dd_terrainData, float * d_extra, int* dd_materialIndex, float** dd_materialData) {
// Prerequisites: call genSlopes() and conduct a water flow update to have the latest WATER_LAST and SLOPE_SIN before execution.
// dissolves material into particles from the top.
//indexing
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if (isOutside(c, r, DW, DH)) { return; }
const int cudaindex = r*DW + c;
//hydraulic erosion
float dissolutionCapacity = 0.001;
if (dissolutionCapacity > WATER[cudaindex]) {
dissolutionCapacity = WATER[cudaindex];
}
if (REGOLITH[cudaindex] > dissolutionCapacity) {
DUST[cudaindex] += REGOLITH[cudaindex] - dissolutionCapacity;
REGOLITH[cudaindex] = dissolutionCapacity;
}
else {
float remainingHydro = dissolutionCapacity - REGOLITH[cudaindex];
for (int i = DLEN - 1; i >= 0; --i) {
const float requestHydro = /*MDATA[MATERIAL[i]]MHYDRO */ remainingHydro;
if (requestHydro > dd_terrainData[i][cudaindex]) {
dd_terrainData[i][cudaindex] = 0;
remainingHydro -= requestHydro;
}
else {
dd_terrainData[i][cudaindex] -= requestHydro;
remainingHydro -= requestHydro;
break;
}
}
REGOLITH[cudaindex] += dissolutionCapacity - remainingHydro - REGOLITH[cudaindex];
}
}
__global__
void d_erodeWaterKinetic(int* dd_intParams, float* dd_floatParams, float** dd_terrainData, float * d_extra, int* dd_materialIndex, float** dd_materialData) {
// Prerequisites: call genSlopes() and conduct a water flow update to have the latest WATER_LAST and SLOPE_SIN before execution.
// dissolves material into particles from the top.
//indexing
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if (isOutside(c, r, DW, DH)) { return; }
const int cudaindex = r*DW + c;
//sediment transport capacity
//S = v * C * sin(alpha)
const float velocity = sqrt(WATER_CELL_VERT[cudaindex] * WATER_CELL_VERT[cudaindex] + WATER_CELL_HOR[cudaindex] * WATER_CELL_HOR[cudaindex]);
//set minimal transport capacity with respect to slope to avoid no effect on flat surfaces.
float transportCapacity = 0.1;
if (SLOPE_SIN[cudaindex] < 0.1) {
transportCapacity *= velocity * 0.1;
}
else {
transportCapacity *= velocity * SLOPE_SIN[cudaindex];
}
const float DISSOLVE = 1;
const float SETTLE = 1;
//compate material levels to capacity
if (SEDIMENT[cudaindex] > transportCapacity) {
//deposit
float delta = SETTLE*(SEDIMENT[cudaindex] - transportCapacity);
DUST[cudaindex] += delta;
SEDIMENT[cudaindex] -= delta;
}
else {
//erode
float delta = DISSOLVE*(transportCapacity - SEDIMENT[cudaindex]);
//start removing material from the terrain.
float remaining = delta - SEDIMENT[cudaindex];
for (int i = DLEN - 1; i >= 0; --i) {
const float request = MDATA[MATERIAL[i]]MHYDRO * remaining;
if(request > dd_terrainData[i][cudaindex]) {
dd_terrainData[i][cudaindex] = 0;
remaining -= request;
}
else {
dd_terrainData[i][cudaindex] -= request;
remaining -= request;
break;
}
}
//
SEDIMENT[cudaindex] += delta-remaining;
}
}
__global__
void d_erodeWaterSemiLag1(int* dd_intParams, float* dd_floatParams, float** dd_terrainData, float * d_extra, int* dd_materialIndex, float** dd_materialData) {
//indexing
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if (isOutside(c, r, DW, DH)) { return; }
const int cudaindex = r*DW + c;
//move material
const float scale = 20;
const float oldx = c - (WATER_CELL_HOR[cudaindex] * scale);
const float oldy = r -(WATER_CELL_VERT[cudaindex] * scale);
const int foldx = floorf(oldx);
const int foldy = floorf(oldy);
//weight with which to pick each
float wx = oldx - foldx;
float wy = oldy - foldy;
float old = (oldy)*DW + (oldx);
const int idx[4] = { (foldy)*DW + (foldx), (foldy)*DW + (foldx+1), (foldy+1)*DW + (foldx), (foldy+1)*DW + (foldx+1) };
const bool valid[4] = { idx[0]>0 && idx[0]<DW*DH, idx[1]>0 && idx[1]<DW*DH, idx[2]>0 && idx[2]<DW*DH, idx[3]>0 && idx[3]<DW*DH };
//
// [foldx, foldy] wx > [foldx+1, foldy]
// wy V
// [foldx, foldy+1] [foldx+1, foldy+1]
//
float amountSediment = 0;
float amountRegolith = 0;
float div;
if (valid[0]) { amountSediment += SEDIMENT[idx[0]] * (1 - wy)*(1 - wx);
amountRegolith += REGOLITH[idx[0]] * (1 - wy)*(1 - wx); }
if (valid[1]) { amountSediment += SEDIMENT[idx[1]] * (1 - wy)*(wx);
amountRegolith += REGOLITH[idx[1]] * (1 - wy)*(wx); }
if (valid[2]) { amountSediment += SEDIMENT[idx[2]] * (wy)*(1 - wx);
amountRegolith += REGOLITH[idx[2]] * (wy)*(1 - wx); }
if (valid[3]) { amountSediment += SEDIMENT[idx[3]] * (wy)*(wx);
amountRegolith += REGOLITH[idx[3]] * (wy)*(wx); }
for (unsigned int i = 0; i < 4; ++i) {
if (valid[i]) {
++div;
}
}
//compensate for missing cells
if (div > 0) {
amountSediment *= 4 / div;
d_extra[cudaindex] = amountSediment;
d_extra[DW*DH+cudaindex] = amountRegolith;
}
//
// Continue after CPU synch
// ...
//
}
__global__
void d_erodeWaterSemiLag2(int* dd_intParams, float* dd_floatParams, float** dd_terrainData, float * d_extra, int* dd_materialIndex, float** dd_materialData) {
// just adds up the values computed temporarily
//indexing
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if (isOutside(c, r, DW, DH)) { return; }
const int cudaindex = r*DW + c;
SEDIMENT[cudaindex] = d_extra[cudaindex];
REGOLITH[cudaindex] = d_extra[DW*DH + cudaindex];
}
__global__
void d_simWaterA(int* dd_intParams, float* dd_floatParams, float** dd_terrainData, float * d_extra, int* d_materialIndex, float** d_materialData) {
// 1 / 4 - compute pressure differences for each CELL. OUT: Pass accelerations to pipes
//indexing
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if (isOutside(c, r, DW, DH)) { return; }
const int cudaindex = r*DW + c;
const int offset = (DW*DH);
//add amount from sprinklers
float val = 0;
for (unsigned int i = 0; i < DPOLY_SPR_LEN; ++i) {
val += DSPRINKLER_STR * d_gaussFalloff(c, r, POLY_SPR[i*3] * 2, POLY_SPR[i*3+2] * 2, DSPRINKLER_RADIUS);
}
WATER[cudaindex] += val;
//pipe indexes: WEST, NORTH, EAST, SOUTH
const int idx[4] = { cudaindex - 1, cudaindex - DW, cudaindex+1, cudaindex + DW };
bool valid[4] = { idx[0] >= 0, idx[1] >= 0, idx[2] < DW*DH, idx[3] < DW*DH };
const int dir[4] = { -1, -1, 1, 1 };
const int pipeidx[4] = { cudaindex - 1, offset + cudaindex - DW, cudaindex, offset + cudaindex };
//bool pipevalid[4] = { pipeidx[0] >= 0 && pipeidx[0]%DW!=DW-1, pipeidx[1] - offset >= 0, pipeidx[2] < DW*DH, pipeidx[3] - offset < DW*(DH-1) };
float dif[4] = { 0,0,0,0 };
//compute
float myh = SUMS[cudaindex] + WATER[cudaindex] + SEDIMENT[cudaindex] + REGOLITH[cudaindex];
for (unsigned char i = 0; i < 4; ++i) {
if (valid[i]) {
dif[i] = myh - SUMS[idx[i]] - WATER[idx[i]] - SEDIMENT[idx[i]] - REGOLITH[idx[i]];
if (dif[i] < 0) { //do not write into higher slots. (only write from above)
dif[i] = 0;
valid[i] = false;
}
}
}
//store accelerations
for (unsigned char i = 0; i < 4; ++i) {
if (valid[i]) {
d_extra[pipeidx[i]] = (float)(dir[i] * dif[i]);
}
}
}
__global__
void d_simWaterB(int* dd_intParams, float* dd_floatParams, float** dd_terrainData, float * d_extra, int* d_materialIndex, float** d_materialData) {
// 2 / 4 - add accelerations to pipe values
//indexing
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if (isOutside(c, r, DW, DH)) { return; }
const int cudaindex = r*DW + c;
const int offset = DW*DH;
//add accelerations
WATER_HOR[cudaindex] += d_extra[cudaindex];
WATER_VERT[cudaindex] += d_extra[offset + cudaindex];
//nullify if source has less water.
if (WATER_HOR[cudaindex] > 0) {
if (WATER[cudaindex] < WATER_HOR[cudaindex]) { WATER_HOR[cudaindex] = WATER[cudaindex]; }
}
else {
if (WATER[cudaindex+1] < -WATER_HOR[cudaindex]) { WATER_HOR[cudaindex] = -WATER[cudaindex + 1]; }
}
//
if (WATER_VERT[cudaindex] > 0) {
if (WATER[cudaindex] < WATER_VERT[cudaindex]) { WATER_VERT[cudaindex] = WATER[cudaindex]; }
}
else {
if (WATER[cudaindex + DW] < -WATER_VERT[cudaindex]) { WATER_VERT[cudaindex] = -WATER[cudaindex + DW]; }
}
}
__global__
void d_simWaterC(int* dd_intParams, float* dd_floatParams, float** dd_terrainData, float * d_extra, int* d_materialIndex, float** d_materialData) {
// 3 / 4 - compute transported amounts of water in each cell based on speed in the pipes.
//indexing
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if (isOutside(c, r, DW, DH)) { return; }
const int cudaindex = r*DW + c;
const int extraidx = 5 * cudaindex;
const int offset = (DW*DH);
//transfer speeds
const int pipeidx[4] = { cudaindex - 1, cudaindex - DW, cudaindex, cudaindex };
const char pipedir[4] = { -1, -1, 1, 1 };
const bool pipevalid[4] = { pipeidx[0] >= 0 && pipeidx[0] % DW != DW - 1, pipeidx[1] >= 0, pipeidx[2] < DW*DH, pipeidx[3] < DW*(DH - 1) };
const int idx[4] = { cudaindex - 1, cudaindex - DW, cudaindex + 1, cudaindex + DW };
//const bool valid[4] = { idx[0] >= 0, idx[1] >= 0, idx[2] < DW*DH, idx[3] < DW*DH };
float dif[4] = { 0,0,0,0 };
//see demands
float sum = 0;
if (pipevalid[0]) {
dif[0] = -WATER_HOR[pipeidx[0]];
}
if (pipevalid[1]) {
dif[1] = -WATER_VERT[pipeidx[1]];
}
if (pipevalid[2]) {
dif[2] = WATER_HOR[pipeidx[2]];
}
if (pipevalid[3]) {
dif[3] = WATER_VERT[pipeidx[3]];
}
for (unsigned char i = 0; i < 4; ++i) {
if (dif[i] < 0) {
dif[i] = 0;
}
else {
sum += dif[i];
}
}
if (sum == 0) { return; }
float amount = sum;
if (amount > WATER[cudaindex]) {
amount = WATER[cudaindex];
}
amount /= 2;
for (unsigned char i = 0; i < 4; ++i) {
if (pipevalid[i]) {
dif[i] = amount * (dif[i] / sum);
d_extra[idx[i] * 5 + 1 + i] = dif[i];
}
}
d_extra[extraidx] = amount;
//calculate overal flows for use in kinetic hydro-erosion
WATER_CELL_VERT[cudaindex] = dif[3] - dif[1];
WATER_CELL_HOR[cudaindex] = dif[2] - dif[0];
}
__global__
void d_simWaterD(int* dd_intParams, float* dd_floatParams, float** dd_terrainData, float * d_extra, int* d_materialIndex, float** d_materialData) {
// 4 / 4 - Add up change in water volumes.
//indexing
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if (isOutside(c, r, DW, DH)) { return; }
const int cudaindex = r*DW + c;
const int extraidx = 5 * cudaindex;
//sum
float sum = -d_extra[extraidx];
for (unsigned char i = 0; i < 4; ++i) {
sum += d_extra[extraidx + 1 + i];
}
if (isBorder(c, r, DW, DH)) {
WATER[cudaindex] = 0;
SEDIMENT[cudaindex] = 0;
REGOLITH[cudaindex] = 0;
}
else {
float evaporate = DEVAPORATION/1000;
if (evaporate < 0) { evaporate = 0; }
WATER[cudaindex] += sum - evaporate;
if (WATER[cudaindex] < 0) { WATER[cudaindex] = 0; }
SEDIMENT[cudaindex] -= evaporate * 2;
if (SEDIMENT[cudaindex] < 0) { SEDIMENT[cudaindex] = 0; }
}
WATER_LAST[cudaindex] = (WATER[cudaindex] + WATER_LAST[cudaindex]) / 2;
}
instructionParam::myCudaParam_t params;
std::mutex m;
std::condition_variable cv;
unsigned int requested = 0;
bool active = true;
bool a_kinetic = true;
bool a_hydraulic = true;
volatile bool activityRequest = false;
void activity() {
//lock simMap
DWORD dwWaitResult;
dwWaitResult = WaitForSingleObject(
params.handle, // handle to mutex
100); // no time-out interval
if (dwWaitResult != WAIT_OBJECT_0) {
return;
}
instructionParam::passParams(params.simMap, params.idx, params.strength, params.radius, params.x, params.y, params.z, params.sprinklerStrength, params.sprinklerRadius, params.evaporation);
cudaError_t err = cudaGetLastError();
if (err != 0) {
printf("XXX PARAM Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
instructionParam::genSum(params.simMap, instructionParam::getIntPtr());
err = cudaGetLastError();
if (err != 0) {
printf("XXX SUM Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
cudaDeviceSynchronize();
float * d_working;
int worksize = 9 * params.simMap->getWidth() * params.simMap->getHeight();
float * h_working = new float[worksize];
for (int i = 0; i < worksize; ++i) {
h_working[i] = 0;
}
err = cudaMalloc(&d_working, worksize * sizeof(float));
err = cudaMemcpy(d_working, h_working, worksize * sizeof(float), cudaMemcpyHostToDevice);
if (err != 0) {
printf("XXX COPY 1 Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
//
const dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
if (params.simMap == NULL) {
printf("NULL\n\n");
}
const dim3 gridSize = dim3((params.simMap->getWidth() / BLOCK_SIZE_X) + 1, (params.simMap->getHeight() / BLOCK_SIZE_Y) + 1);
d_simWaterA << <gridSize, blockSize >> > (
instructionParam::getIntPtr(), instructionParam::getFloatPtr(),
params.simMap->getDeviceLayerDataList(),
d_working,
params.simMap->getDeviceLayerMaterialIndexList(),
params.simMap->getDeviceMaterialDataList());
if (err != 0) {
printf("XXX WATERA Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
//CPU synchro
d_simWaterB << <gridSize, blockSize >> > (
instructionParam::getIntPtr(), instructionParam::getFloatPtr(),
params.simMap->getDeviceLayerDataList(),
d_working,
params.simMap->getDeviceLayerMaterialIndexList(),
params.simMap->getDeviceMaterialDataList());
if (err != 0) {
printf("XXX WATERB Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
//cleanse memory, we are repurposing it from now on.
cudaDeviceSynchronize();
err = cudaMemcpy(d_working, h_working, worksize * sizeof(float), cudaMemcpyHostToDevice);
if (err != 0) {
printf("XXX COPY 2 Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
//CPU synchro
d_simWaterC << <gridSize, blockSize >> > (
instructionParam::getIntPtr(), instructionParam::getFloatPtr(),
params.simMap->getDeviceLayerDataList(),
d_working,
params.simMap->getDeviceLayerMaterialIndexList(),
params.simMap->getDeviceMaterialDataList());
//CPU synchro
d_simWaterD << <gridSize, blockSize >> > (
instructionParam::getIntPtr(), instructionParam::getFloatPtr(),
params.simMap->getDeviceLayerDataList(),
d_working,
params.simMap->getDeviceLayerMaterialIndexList(),
params.simMap->getDeviceMaterialDataList());
//
//Now that water has been updated compute erosion
//
//cleanse memory, we are repurposing it from now on.
cudaDeviceSynchronize();
err = cudaMemcpy(d_working, h_working, worksize * sizeof(float), cudaMemcpyHostToDevice);
if (err != 0) {
printf("XXX COPY 3 Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
//
if (a_kinetic) {
d_erodeWaterKinetic << <gridSize, blockSize >> > (
instructionParam::getIntPtr(), instructionParam::getFloatPtr(),
params.simMap->getDeviceLayerDataList(),
d_working,
params.simMap->getDeviceLayerMaterialIndexList(),
params.simMap->getDeviceMaterialDataList());
}
//
if (a_hydraulic) {
d_erodeWaterHydraulic << <gridSize, blockSize >> > (
instructionParam::getIntPtr(), instructionParam::getFloatPtr(),
params.simMap->getDeviceLayerDataList(),
d_working,
params.simMap->getDeviceLayerMaterialIndexList(),
params.simMap->getDeviceMaterialDataList());
}
//CPU synchro
d_erodeWaterSemiLag1 << <gridSize, blockSize >> > (
instructionParam::getIntPtr(), instructionParam::getFloatPtr(),
params.simMap->getDeviceLayerDataList(),
d_working,
params.simMap->getDeviceLayerMaterialIndexList(),
params.simMap->getDeviceMaterialDataList());
//CPU synchro
d_erodeWaterSemiLag2 << <gridSize, blockSize >> > (
instructionParam::getIntPtr(), instructionParam::getFloatPtr(),
params.simMap->getDeviceLayerDataList(),
d_working,
params.simMap->getDeviceLayerMaterialIndexList(),
params.simMap->getDeviceMaterialDataList());
//
if (params.callback != NULL) {
params.callback();
}
//free
err = cudaFree(d_working);
if (err != 0) {
printf("XXX FREE Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
free(h_working);
cudaStreamSynchronize(0);
//unlock simMap
ReleaseMutex(params.handle);
err = cudaGetLastError();
if (err != 0) {
printf("XXX Hydraulic CUDA encountered an error number %d! \n", err);
return;
exit(-1);
}
}
void worker_thread()
{
while (true) {
// Wait until main() sends data
std::unique_lock<std::mutex> lk(m);
cv.wait(lk, [] {return activityRequest; });
if (!active) { lk.unlock(); return; }
// after the wait, we own the lock.
//std::cout << "Hydro worker thread is active.\n";
activity();
// Manual unlocking is done before notifying, to avoid waking up
// the waiting thread only to block again (see notify_one for details)
lk.unlock();
activityRequest = false;
}
}
void killThread() {
printf("Terminating Hydro Thread\n");
active = false;
cv.notify_one();
}
void initThread() {
printf("Initing Hydro Thread\n");
std::thread worker(worker_thread);
worker.detach();
}
void simWater(float x, float y, float z, bool kinetic, bool hydraulic, int idx, float toolStregth, float toolRadius, float dir, SimMap* simMap, HANDLE handle, void(*callback)(), float sprinklerStrength, float sprinklerRadius, float evaporation) {
params = { x,y,z,toolRadius, toolStregth*dir, idx, simMap,handle,callback, sprinklerStrength, sprinklerRadius, evaporation };
a_kinetic = kinetic;
a_hydraulic = hydraulic;
//ping thread
activityRequest = true;
cv.notify_one();
}
}
|
171a994f03a173402e25c7ff9be2f2edf70cd9f6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdint>
#include <memory>
#include <random>
#include <functional>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "cuda_helper.h"
const size_t BLOCK_SIZE = 128u;
//
// | _ _ (0) _ _ | _ _ (1) _ _ | ... | _ _ (nthread - 1) _ _ |
__global__
void kernel_histogram_naive_block(const char* input, int* result, const size_t len, const size_t n_bin)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
//
int section = (len - 1) / (blockDim.x * gridDim.x) + 1;
int start = i * section;
for (int k = 0; k < section; k++)
{
if (start + k < len)
{
int c = input[start + k];
if (c >= 0 && c < n_bin)
{
atomicAdd(&result[c], 1);
}
}
}
}
//
// |(0)(1) ... (nthread - 1) |(0)(1) ... (nthread - 1) | ... |
__global__
void kernel_histogram_naive_interleaved(const char* input, int* result, const size_t len, const size_t n_bin)
{
const int section = blockDim.x * gridDim.x;
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < len; i += section)
{
int c = input[i];
if (c >= 0 && c < n_bin)
{
atomicAdd(&result[c], 1);
}
}
}
//
__global__
void kernel_histogram_privatized(const char* input, int* result, const size_t len, const size_t n_bin)
{
extern __shared__ int local_hist[];
for (int bid = threadIdx.x; bid < n_bin; bid += blockDim.x)
{
local_hist[bid] = 0;
}
__syncthreads();
const int section = blockDim.x * gridDim.x;
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < len; i += section)
{
int c = input[i];
if (c >= 0 && c < n_bin)
{
atomicAdd(&local_hist[c], 1);
}
}
__syncthreads();
// merge
for (int bid = threadIdx.x; bid < n_bin; bid += blockDim.x)
{
atomicAdd(&result[bid], local_hist[bid]);
}
}
// bin
__global__
void kernel_histogram_aggregated(const char* input, int* result, const size_t len, const size_t n_bin)
{
extern __shared__ int local_hist[];
for (int bid = threadIdx.x; bid < n_bin; bid += blockDim.x)
{
local_hist[bid] = 0;
}
__syncthreads();
const int section = blockDim.x * gridDim.x;
int pre = -1;
int cur = 0;
int accumulator = 0;
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < len; i += section)
{
int c = input[i];
if (c >= 0 && c < n_bin)
{
cur = c;
if (cur != pre)
{
if (pre != -1)
{
atomicAdd(&local_hist[pre], accumulator);
}
accumulator = 1;
pre = cur;
}
else
{
accumulator++;
}
}
}
if (pre != -1)
{
atomicAdd(&local_hist[pre], accumulator);
}
__syncthreads();
for (int bid = threadIdx.x; bid < n_bin; bid += blockDim.x)
{
atomicAdd(&result[bid], local_hist[bid]);
}
}
std::unique_ptr<int[]> gpu_histogram(const std::unique_ptr<char[]>& input, const size_t len, const size_t n_bin,
void(*kernel)(const char*, int *, size_t, size_t))
{
std::unique_ptr<int[]> h_result(new int[n_bin]);
char *d_input = nullptr;
int *d_result = nullptr;
cc(hipMalloc(&d_input, sizeof(char) * len));
cc(hipMemcpy(d_input, input.get(), sizeof(char) * len, hipMemcpyHostToDevice));
cc(hipMalloc(&d_result, sizeof(int) * n_bin));
cc(hipMemset(d_result, 0, sizeof(int) * n_bin));
if (kernel != kernel_histogram_privatized && kernel != kernel_histogram_aggregated)
{
hipLaunchKernelGGL(( kernel), dim3((len - 1) / (128 * BLOCK_SIZE) + 1), dim3(BLOCK_SIZE), 0, 0, d_input, d_result, len, n_bin);
}
else
{
hipLaunchKernelGGL(( kernel), dim3((len - 1) / (128 * BLOCK_SIZE) + 1), dim3(BLOCK_SIZE), sizeof(int) * n_bin, 0, d_input, d_result, len, n_bin);
}
cc(hipDeviceSynchronize());
cc(hipMemcpy(h_result.get(), d_result, sizeof(int) * n_bin, hipMemcpyDeviceToHost));
hipFree(d_input);
hipFree(d_result);
return h_result;
}
std::unique_ptr<int[]> cpu_histogram(const std::unique_ptr<char[]>& input, const size_t len, const size_t n_bin)
{
std::unique_ptr<int[]> result(new int[n_bin]);
std::fill(result.get(), result.get() + n_bin, 0);
for (int i = 0; i < len; i++)
{
int c = input[i];
if (c >= 0 && c < n_bin)
{
result[c]++;
}
}
return result;
}
bool valid(const std::unique_ptr<int[]>& h_result, const std::unique_ptr<int[]>& d_result, const size_t len)
{
bool is_valid = true;
for (auto i = 0; i < len; i++)
{
auto delta = h_result[i] - d_result[i];
if (delta != 0)
{
is_valid = false;
printf("At [%d]: %d vs %d\n", i, h_result[i], d_result[i]);
}
}
if (is_valid)
{
printf("All OK\n");
}
else
{
printf("Somewhere error\n");
}
return is_valid;
}
enum class DataDistribution
{
UNIFORM,
BERNOULLI
};
void test(const size_t len, const size_t n_bin, DataDistribution type = DataDistribution::UNIFORM)
{
std::unique_ptr<char[]> input(new char[len]);
if (type == DataDistribution::UNIFORM)
{
std::default_random_engine rd;
std::uniform_int_distribution<uint32_t> dis(0, 127);
#pragma omp parallel for
for (auto i = 0; i < len; i++)
{
input[i] = dis(rd);
}
printf("Uniform: \n");
}
else
{
std::default_random_engine rd;
std::bernoulli_distribution dis(0.5);
#pragma omp parallel for
for (auto i = 0; i < len; i++)
{
input[i] = dis(rd) ? 1 : 0;
}
printf("Bernoulli: \n");
}
TimeCounter<> tcpu;
auto h_result = cpu_histogram(input, len, n_bin);
tcpu.output("CPU: ");
{
TimeCounter<> tgpu;
auto d_result = gpu_histogram(input, len, n_bin, kernel_histogram_naive_block);
tgpu.output("GPU Naive Block: ");
valid(h_result, d_result, n_bin);
}
{
TimeCounter<> tgpu;
auto d_result = gpu_histogram(input, len, n_bin, kernel_histogram_naive_interleaved);
tgpu.output("GPU Naive Interleaved: ");
valid(h_result, d_result, n_bin);
}
{
TimeCounter<> tgpu;
auto d_result = gpu_histogram(input, len, n_bin, kernel_histogram_privatized);
tgpu.output("GPU Privatized: ");
valid(h_result, d_result, n_bin);
}
{
TimeCounter<> tgpu;
auto d_result = gpu_histogram(input, len, n_bin, kernel_histogram_aggregated);
tgpu.output("GPU Aggregated: ");
valid(h_result, d_result, n_bin);
}
printf("\n\n");
}
int main()
{
const size_t len = 600'000'000;
const size_t n_bin = 128u;
printf("Length: %zu Bins: %zu\n", len, n_bin);
test(len, n_bin, DataDistribution::UNIFORM);
test(len, n_bin, DataDistribution::BERNOULLI);
return 0;
}
|
171a994f03a173402e25c7ff9be2f2edf70cd9f6.cu
|
#include <iostream>
#include <cstdint>
#include <memory>
#include <random>
#include <functional>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "cuda_helper.h"
const size_t BLOCK_SIZE = 128u;
// 按块划分,每个线程处理一个连续的块
// | _ _ (0) _ _ | _ _ (1) _ _ | ... | _ _ (nthread - 1) _ _ |
__global__
void kernel_histogram_naive_block(const char* input, int* result, const size_t len, const size_t n_bin)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
// 一块的大小
int section = (len - 1) / (blockDim.x * gridDim.x) + 1;
int start = i * section;
for (int k = 0; k < section; k++)
{
if (start + k < len)
{
int c = input[start + k];
if (c >= 0 && c < n_bin)
{
atomicAdd(&result[c], 1);
}
}
}
}
// 交错划分,每个线程处理一个连续的块
// |(0)(1) ... (nthread - 1) |(0)(1) ... (nthread - 1) | ... |
__global__
void kernel_histogram_naive_interleaved(const char* input, int* result, const size_t len, const size_t n_bin)
{
const int section = blockDim.x * gridDim.x;
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < len; i += section)
{
int c = input[i];
if (c >= 0 && c < n_bin)
{
atomicAdd(&result[c], 1);
}
}
}
// 使用共享内存处理
__global__
void kernel_histogram_privatized(const char* input, int* result, const size_t len, const size_t n_bin)
{
extern __shared__ int local_hist[];
for (int bid = threadIdx.x; bid < n_bin; bid += blockDim.x)
{
local_hist[bid] = 0;
}
__syncthreads();
const int section = blockDim.x * gridDim.x;
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < len; i += section)
{
int c = input[i];
if (c >= 0 && c < n_bin)
{
atomicAdd(&local_hist[c], 1);
}
}
__syncthreads();
// merge
for (int bid = threadIdx.x; bid < n_bin; bid += blockDim.x)
{
atomicAdd(&result[bid], local_hist[bid]);
}
}
// 使用共享内存处理并将对相同bin的数据进行聚合处理
__global__
void kernel_histogram_aggregated(const char* input, int* result, const size_t len, const size_t n_bin)
{
extern __shared__ int local_hist[];
for (int bid = threadIdx.x; bid < n_bin; bid += blockDim.x)
{
local_hist[bid] = 0;
}
__syncthreads();
const int section = blockDim.x * gridDim.x;
int pre = -1;
int cur = 0;
int accumulator = 0;
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < len; i += section)
{
int c = input[i];
if (c >= 0 && c < n_bin)
{
cur = c;
if (cur != pre)
{
if (pre != -1)
{
atomicAdd(&local_hist[pre], accumulator);
}
accumulator = 1;
pre = cur;
}
else
{
accumulator++;
}
}
}
if (pre != -1)
{
atomicAdd(&local_hist[pre], accumulator);
}
__syncthreads();
for (int bid = threadIdx.x; bid < n_bin; bid += blockDim.x)
{
atomicAdd(&result[bid], local_hist[bid]);
}
}
std::unique_ptr<int[]> gpu_histogram(const std::unique_ptr<char[]>& input, const size_t len, const size_t n_bin,
void(*kernel)(const char*, int *, size_t, size_t))
{
std::unique_ptr<int[]> h_result(new int[n_bin]);
char *d_input = nullptr;
int *d_result = nullptr;
cc(cudaMalloc(&d_input, sizeof(char) * len));
cc(cudaMemcpy(d_input, input.get(), sizeof(char) * len, cudaMemcpyHostToDevice));
cc(cudaMalloc(&d_result, sizeof(int) * n_bin));
cc(cudaMemset(d_result, 0, sizeof(int) * n_bin));
if (kernel != kernel_histogram_privatized && kernel != kernel_histogram_aggregated)
{
kernel<<<(len - 1) / (128 * BLOCK_SIZE) + 1, BLOCK_SIZE>>>(d_input, d_result, len, n_bin);
}
else
{
kernel<<<(len - 1) / (128 * BLOCK_SIZE) + 1, BLOCK_SIZE, sizeof(int) * n_bin>>>(d_input, d_result, len, n_bin);
}
cc(cudaDeviceSynchronize());
cc(cudaMemcpy(h_result.get(), d_result, sizeof(int) * n_bin, cudaMemcpyDeviceToHost));
cudaFree(d_input);
cudaFree(d_result);
return h_result;
}
std::unique_ptr<int[]> cpu_histogram(const std::unique_ptr<char[]>& input, const size_t len, const size_t n_bin)
{
std::unique_ptr<int[]> result(new int[n_bin]);
std::fill(result.get(), result.get() + n_bin, 0);
for (int i = 0; i < len; i++)
{
int c = input[i];
if (c >= 0 && c < n_bin)
{
result[c]++;
}
}
return result;
}
bool valid(const std::unique_ptr<int[]>& h_result, const std::unique_ptr<int[]>& d_result, const size_t len)
{
bool is_valid = true;
for (auto i = 0; i < len; i++)
{
auto delta = h_result[i] - d_result[i];
if (delta != 0)
{
is_valid = false;
printf("At [%d]: %d vs %d\n", i, h_result[i], d_result[i]);
}
}
if (is_valid)
{
printf("All OK\n");
}
else
{
printf("Somewhere error\n");
}
return is_valid;
}
enum class DataDistribution
{
UNIFORM,
BERNOULLI
};
void test(const size_t len, const size_t n_bin, DataDistribution type = DataDistribution::UNIFORM)
{
std::unique_ptr<char[]> input(new char[len]);
if (type == DataDistribution::UNIFORM)
{
std::default_random_engine rd;
std::uniform_int_distribution<uint32_t> dis(0, 127);
#pragma omp parallel for
for (auto i = 0; i < len; i++)
{
input[i] = dis(rd);
}
printf("Uniform: \n");
}
else
{
std::default_random_engine rd;
std::bernoulli_distribution dis(0.5);
#pragma omp parallel for
for (auto i = 0; i < len; i++)
{
input[i] = dis(rd) ? 1 : 0;
}
printf("Bernoulli: \n");
}
TimeCounter<> tcpu;
auto h_result = cpu_histogram(input, len, n_bin);
tcpu.output("CPU: ");
{
TimeCounter<> tgpu;
auto d_result = gpu_histogram(input, len, n_bin, kernel_histogram_naive_block);
tgpu.output("GPU Naive Block: ");
valid(h_result, d_result, n_bin);
}
{
TimeCounter<> tgpu;
auto d_result = gpu_histogram(input, len, n_bin, kernel_histogram_naive_interleaved);
tgpu.output("GPU Naive Interleaved: ");
valid(h_result, d_result, n_bin);
}
{
TimeCounter<> tgpu;
auto d_result = gpu_histogram(input, len, n_bin, kernel_histogram_privatized);
tgpu.output("GPU Privatized: ");
valid(h_result, d_result, n_bin);
}
{
TimeCounter<> tgpu;
auto d_result = gpu_histogram(input, len, n_bin, kernel_histogram_aggregated);
tgpu.output("GPU Aggregated: ");
valid(h_result, d_result, n_bin);
}
printf("\n\n");
}
int main()
{
const size_t len = 600'000'000;
const size_t n_bin = 128u;
printf("Length: %zu Bins: %zu\n", len, n_bin);
test(len, n_bin, DataDistribution::UNIFORM);
test(len, n_bin, DataDistribution::BERNOULLI);
return 0;
}
|
31981455a754467455671f0cb5fe3140f4feb517.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "readppm.c"
#ifdef __APPLE__
#include <GLUT/glut.h>
#include <OpenGL/gl.h>
#else
#include <GL/glut.h>
#endif
__global__ void filter(unsigned char *image, unsigned char *out, int n, int m)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int sumx, sumy, sumz, k, l;
// printf is OK under --device-emulation
// printf("%d %d %d %d\n", i, j, n, m);
if (j < n && i < m)
{
out[(i*n+j)*3+0] = image[(i*n+j)*3+0];
out[(i*n+j)*3+1] = image[(i*n+j)*3+1];
out[(i*n+j)*3+2] = image[(i*n+j)*3+2];
}
if (i > 1 && i < m-2 && j > 1 && j < n-2)
{
// Filter kernel
sumx=0;sumy=0;sumz=0;
for(k=-2;k<3;k++)
for(l=-2;l<3;l++)
{
sumx += image[((i+k)*n+(j+l))*3+0];
sumy += image[((i+k)*n+(j+l))*3+1];
sumz += image[((i+k)*n+(j+l))*3+2];
}
out[(i*n+j)*3+0] = sumx/25;
out[(i*n+j)*3+1] = sumy/25;
out[(i*n+j)*3+2] = sumz/25;
}
}
// Compute CUDA kernel and display image
void Draw()
{
unsigned char *image, *out;
int n, m;
unsigned char *dev_image, *dev_out;
image = readppm("maskros512.ppm", &n, &m);
out = (unsigned char*) malloc(n*m*3);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipMalloc( (void**)&dev_image, n*m*3);
hipMalloc( (void**)&dev_out, n*m*3);
hipMemcpy( dev_image, image, n*m*3, hipMemcpyHostToDevice);
dim3 dimBlock( 16, 16 );
dim3 dimGrid( 32, 32 );
hipLaunchKernelGGL(( filter), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_image, dev_out, n, m);
hipDeviceSynchronize();
hipMemcpy( out, dev_out, n*m*3, hipMemcpyDeviceToHost );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float theTime;
hipEventElapsedTime(&theTime, start, stop);
printf("Things took %f ms\n", theTime);
hipFree(dev_image);
hipFree(dev_out);
// Dump the whole picture onto the screen.
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glRasterPos2f(-1, -1);
glDrawPixels( n, m, GL_RGB, GL_UNSIGNED_BYTE, image );
glRasterPos2i(0, -1);
glDrawPixels( n, m, GL_RGB, GL_UNSIGNED_BYTE, out );
glFlush();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA );
glutInitWindowSize( 1024, 512 );
glutCreateWindow("CUDA on live GL");
glutDisplayFunc(Draw);
glutMainLoop();
}
|
31981455a754467455671f0cb5fe3140f4feb517.cu
|
#include <stdio.h>
#include "readppm.c"
#ifdef __APPLE__
#include <GLUT/glut.h>
#include <OpenGL/gl.h>
#else
#include <GL/glut.h>
#endif
__global__ void filter(unsigned char *image, unsigned char *out, int n, int m)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int sumx, sumy, sumz, k, l;
// printf is OK under --device-emulation
// printf("%d %d %d %d\n", i, j, n, m);
if (j < n && i < m)
{
out[(i*n+j)*3+0] = image[(i*n+j)*3+0];
out[(i*n+j)*3+1] = image[(i*n+j)*3+1];
out[(i*n+j)*3+2] = image[(i*n+j)*3+2];
}
if (i > 1 && i < m-2 && j > 1 && j < n-2)
{
// Filter kernel
sumx=0;sumy=0;sumz=0;
for(k=-2;k<3;k++)
for(l=-2;l<3;l++)
{
sumx += image[((i+k)*n+(j+l))*3+0];
sumy += image[((i+k)*n+(j+l))*3+1];
sumz += image[((i+k)*n+(j+l))*3+2];
}
out[(i*n+j)*3+0] = sumx/25;
out[(i*n+j)*3+1] = sumy/25;
out[(i*n+j)*3+2] = sumz/25;
}
}
// Compute CUDA kernel and display image
void Draw()
{
unsigned char *image, *out;
int n, m;
unsigned char *dev_image, *dev_out;
image = readppm("maskros512.ppm", &n, &m);
out = (unsigned char*) malloc(n*m*3);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc( (void**)&dev_image, n*m*3);
cudaMalloc( (void**)&dev_out, n*m*3);
cudaMemcpy( dev_image, image, n*m*3, cudaMemcpyHostToDevice);
dim3 dimBlock( 16, 16 );
dim3 dimGrid( 32, 32 );
filter<<<dimGrid, dimBlock>>>(dev_image, dev_out, n, m);
cudaThreadSynchronize();
cudaMemcpy( out, dev_out, n*m*3, cudaMemcpyDeviceToHost );
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float theTime;
cudaEventElapsedTime(&theTime, start, stop);
printf("Things took %f ms\n", theTime);
cudaFree(dev_image);
cudaFree(dev_out);
// Dump the whole picture onto the screen.
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glRasterPos2f(-1, -1);
glDrawPixels( n, m, GL_RGB, GL_UNSIGNED_BYTE, image );
glRasterPos2i(0, -1);
glDrawPixels( n, m, GL_RGB, GL_UNSIGNED_BYTE, out );
glFlush();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA );
glutInitWindowSize( 1024, 512 );
glutCreateWindow("CUDA on live GL");
glutDisplayFunc(Draw);
glutMainLoop();
}
|
3f97759e19b0fa6aa7336c26e3b425bc04cfc9e7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void initializeBiasKernel_tanh(float* b, int size){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < size){
b[index] = 0.0;
}
}
|
3f97759e19b0fa6aa7336c26e3b425bc04cfc9e7.cu
|
#include "includes.h"
__global__ void initializeBiasKernel_tanh(float* b, int size){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < size){
b[index] = 0.0;
}
}
|
e4048ad1e960cf936e66d1d6881274205456e227.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "cuda/dcn_v2_im2col_cuda.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
//extern THCState *state; //
THCState *state = at::globalContext().lazyInitCUDA(); //
// author: Charles Shang
// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
// [batch gemm]
// https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu
__global__ void createBatchGemmBuffer(const float **input_b, float **output_b,
float **columns_b, const float **ones_b,
const float **weight_b, const float **bias_b,
float *input, float *output,
float *columns, float *ones,
float *weight, float *bias,
const int input_stride, const int output_stride,
const int columns_stride, const int ones_stride,
const int num_batches)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches)
{
input_b[idx] = input + idx * input_stride;
output_b[idx] = output + idx * output_stride;
columns_b[idx] = columns + idx * columns_stride;
ones_b[idx] = ones + idx * ones_stride;
// share weights and bias within a Mini-Batch
weight_b[idx] = weight;
bias_b[idx] = bias;
}
}
at::Tensor
dcn_v2_cuda_forward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &mask,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int deformable_group)
{
using scalar_t = float;
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
// printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h);
// printf("Channels: %d %d\n", channels, channels_kernel);
// printf("Channels: %d %d\n", channels_out, channels_kernel);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == channels_kernel,
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto ones = at::ones({batch, height_out, width_out}, input.options());
auto columns = at::empty({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
// prepare for batch-wise computing, which is significantly faster than instance-wise computing
// when batch size is large.
// launch batch threads
int matrices_size = batch * sizeof(float *);
auto input_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto output_b = static_cast<float **>(THCudaMalloc(state, matrices_size));
auto columns_b = static_cast<float **>(THCudaMalloc(state, matrices_size));
auto ones_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto weight_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto bias_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
const int block = 128;
const int grid = (batch + block - 1) / block;
hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
input_b, output_b,
columns_b, ones_b,
weight_b, bias_b,
input.data<scalar_t>(),
output.data<scalar_t>(),
columns.data<scalar_t>(),
ones.data<scalar_t>(),
weight.data<scalar_t>(),
bias.data<scalar_t>(),
channels * width * height,
channels_out * width_out * height_out,
channels * kernel_h * kernel_w * height_out * width_out,
height_out * width_out,
batch);
long m_ = channels_out;
long n_ = height_out * width_out;
long k_ = 1;
THCudaBlas_SgemmBatched(state,
't',
'n',
n_,
m_,
k_,
1.0f,
ones_b, k_,
bias_b, k_,
0.0f,
output_b, n_,
batch);
modulated_deformable_im2col_cuda(THCState_getCurrentStream(state),
input.data<scalar_t>(),
offset.data<scalar_t>(),
mask.data<scalar_t>(),
batch, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group,
columns.data<scalar_t>());
long m = channels_out;
long n = height_out * width_out;
long k = channels * kernel_h * kernel_w;
THCudaBlas_SgemmBatched(state,
'n',
'n',
n,
m,
k,
1.0f,
(const float **)columns_b, n,
weight_b, k,
1.0f,
output_b, n,
batch);
THCudaFree(state, input_b);
THCudaFree(state, output_b);
THCudaFree(state, columns_b);
THCudaFree(state, ones_b);
THCudaFree(state, weight_b);
THCudaFree(state, bias_b);
return output;
}
__global__ void createBatchGemmBufferBackward(
float **grad_output_b,
float **columns_b,
float **ones_b,
float **weight_b,
float **grad_weight_b,
float **grad_bias_b,
float *grad_output,
float *columns,
float *ones,
float *weight,
float *grad_weight,
float *grad_bias,
const int grad_output_stride,
const int columns_stride,
const int ones_stride,
const int num_batches)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches)
{
grad_output_b[idx] = grad_output + idx * grad_output_stride;
columns_b[idx] = columns + idx * columns_stride;
ones_b[idx] = ones + idx * ones_stride;
// share weights and bias within a Mini-Batch
weight_b[idx] = weight;
grad_weight_b[idx] = grad_weight;
grad_bias_b[idx] = grad_bias;
}
}
std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &mask,
const at::Tensor &grad_output,
int kernel_h, int kernel_w,
int stride_h, int stride_w,
int pad_h, int pad_w,
int dilation_h, int dilation_w,
int deformable_group)
{
THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous");
THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == channels_kernel,
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto ones = at::ones({height_out, width_out}, input.options());
auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
auto grad_input = at::zeros_like(input);
auto grad_weight = at::zeros_like(weight);
auto grad_bias = at::zeros_like(bias);
auto grad_offset = at::zeros_like(offset);
auto grad_mask = at::zeros_like(mask);
using scalar_t = float;
for (int b = 0; b < batch; b++)
{
auto input_n = input.select(0, b);
auto offset_n = offset.select(0, b);
auto mask_n = mask.select(0, b);
auto grad_output_n = grad_output.select(0, b);
auto grad_input_n = grad_input.select(0, b);
auto grad_offset_n = grad_offset.select(0, b);
auto grad_mask_n = grad_mask.select(0, b);
long m = channels * kernel_h * kernel_w;
long n = height_out * width_out;
long k = channels_out;
THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f,
grad_output_n.data<scalar_t>(), n,
weight.data<scalar_t>(), m, 0.0f,
columns.data<scalar_t>(), n);
// gradient w.r.t. input coordinate data
modulated_deformable_col2im_coord_cuda(THCState_getCurrentStream(state),
columns.data<scalar_t>(),
input_n.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_offset_n.data<scalar_t>(),
grad_mask_n.data<scalar_t>());
// gradient w.r.t. input data
modulated_deformable_col2im_cuda(THCState_getCurrentStream(state),
columns.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_input_n.data<scalar_t>());
// gradient w.r.t. weight, dWeight should accumulate across the batch and group
modulated_deformable_im2col_cuda(THCState_getCurrentStream(state),
input_n.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
columns.data<scalar_t>());
long m_ = channels_out;
long n_ = channels * kernel_h * kernel_w;
long k_ = height_out * width_out;
THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f,
columns.data<scalar_t>(), k_,
grad_output_n.data<scalar_t>(), k_, 1.0f,
grad_weight.data<scalar_t>(), n_);
// gradient w.r.t. bias
// long m_ = channels_out;
// long k__ = height_out * width_out;
THCudaBlas_Sgemv(state,
't',
k_, m_, 1.0f,
grad_output_n.data<scalar_t>(), k_,
ones.data<scalar_t>(), 1, 1.0f,
grad_bias.data<scalar_t>(), 1);
}
return {
grad_input, grad_offset, grad_mask, grad_weight, grad_bias
};
}
|
e4048ad1e960cf936e66d1d6881274205456e227.cu
|
#include <vector>
#include "cuda/dcn_v2_im2col_cuda.h"
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
//extern THCState *state; // 注释该行
THCState *state = at::globalContext().lazyInitCUDA(); // 添加该行
// author: Charles Shang
// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
// [batch gemm]
// https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu
__global__ void createBatchGemmBuffer(const float **input_b, float **output_b,
float **columns_b, const float **ones_b,
const float **weight_b, const float **bias_b,
float *input, float *output,
float *columns, float *ones,
float *weight, float *bias,
const int input_stride, const int output_stride,
const int columns_stride, const int ones_stride,
const int num_batches)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches)
{
input_b[idx] = input + idx * input_stride;
output_b[idx] = output + idx * output_stride;
columns_b[idx] = columns + idx * columns_stride;
ones_b[idx] = ones + idx * ones_stride;
// share weights and bias within a Mini-Batch
weight_b[idx] = weight;
bias_b[idx] = bias;
}
}
at::Tensor
dcn_v2_cuda_forward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &mask,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int deformable_group)
{
using scalar_t = float;
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
// printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h);
// printf("Channels: %d %d\n", channels, channels_kernel);
// printf("Channels: %d %d\n", channels_out, channels_kernel);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == channels_kernel,
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto ones = at::ones({batch, height_out, width_out}, input.options());
auto columns = at::empty({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
// prepare for batch-wise computing, which is significantly faster than instance-wise computing
// when batch size is large.
// launch batch threads
int matrices_size = batch * sizeof(float *);
auto input_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto output_b = static_cast<float **>(THCudaMalloc(state, matrices_size));
auto columns_b = static_cast<float **>(THCudaMalloc(state, matrices_size));
auto ones_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto weight_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto bias_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
const int block = 128;
const int grid = (batch + block - 1) / block;
createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
input_b, output_b,
columns_b, ones_b,
weight_b, bias_b,
input.data<scalar_t>(),
output.data<scalar_t>(),
columns.data<scalar_t>(),
ones.data<scalar_t>(),
weight.data<scalar_t>(),
bias.data<scalar_t>(),
channels * width * height,
channels_out * width_out * height_out,
channels * kernel_h * kernel_w * height_out * width_out,
height_out * width_out,
batch);
long m_ = channels_out;
long n_ = height_out * width_out;
long k_ = 1;
THCudaBlas_SgemmBatched(state,
't',
'n',
n_,
m_,
k_,
1.0f,
ones_b, k_,
bias_b, k_,
0.0f,
output_b, n_,
batch);
modulated_deformable_im2col_cuda(THCState_getCurrentStream(state),
input.data<scalar_t>(),
offset.data<scalar_t>(),
mask.data<scalar_t>(),
batch, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group,
columns.data<scalar_t>());
long m = channels_out;
long n = height_out * width_out;
long k = channels * kernel_h * kernel_w;
THCudaBlas_SgemmBatched(state,
'n',
'n',
n,
m,
k,
1.0f,
(const float **)columns_b, n,
weight_b, k,
1.0f,
output_b, n,
batch);
THCudaFree(state, input_b);
THCudaFree(state, output_b);
THCudaFree(state, columns_b);
THCudaFree(state, ones_b);
THCudaFree(state, weight_b);
THCudaFree(state, bias_b);
return output;
}
__global__ void createBatchGemmBufferBackward(
float **grad_output_b,
float **columns_b,
float **ones_b,
float **weight_b,
float **grad_weight_b,
float **grad_bias_b,
float *grad_output,
float *columns,
float *ones,
float *weight,
float *grad_weight,
float *grad_bias,
const int grad_output_stride,
const int columns_stride,
const int ones_stride,
const int num_batches)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches)
{
grad_output_b[idx] = grad_output + idx * grad_output_stride;
columns_b[idx] = columns + idx * columns_stride;
ones_b[idx] = ones + idx * ones_stride;
// share weights and bias within a Mini-Batch
weight_b[idx] = weight;
grad_weight_b[idx] = grad_weight;
grad_bias_b[idx] = grad_bias;
}
}
std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &mask,
const at::Tensor &grad_output,
int kernel_h, int kernel_w,
int stride_h, int stride_w,
int pad_h, int pad_w,
int dilation_h, int dilation_w,
int deformable_group)
{
THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous");
THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == channels_kernel,
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto ones = at::ones({height_out, width_out}, input.options());
auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
auto grad_input = at::zeros_like(input);
auto grad_weight = at::zeros_like(weight);
auto grad_bias = at::zeros_like(bias);
auto grad_offset = at::zeros_like(offset);
auto grad_mask = at::zeros_like(mask);
using scalar_t = float;
for (int b = 0; b < batch; b++)
{
auto input_n = input.select(0, b);
auto offset_n = offset.select(0, b);
auto mask_n = mask.select(0, b);
auto grad_output_n = grad_output.select(0, b);
auto grad_input_n = grad_input.select(0, b);
auto grad_offset_n = grad_offset.select(0, b);
auto grad_mask_n = grad_mask.select(0, b);
long m = channels * kernel_h * kernel_w;
long n = height_out * width_out;
long k = channels_out;
THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f,
grad_output_n.data<scalar_t>(), n,
weight.data<scalar_t>(), m, 0.0f,
columns.data<scalar_t>(), n);
// gradient w.r.t. input coordinate data
modulated_deformable_col2im_coord_cuda(THCState_getCurrentStream(state),
columns.data<scalar_t>(),
input_n.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_offset_n.data<scalar_t>(),
grad_mask_n.data<scalar_t>());
// gradient w.r.t. input data
modulated_deformable_col2im_cuda(THCState_getCurrentStream(state),
columns.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_input_n.data<scalar_t>());
// gradient w.r.t. weight, dWeight should accumulate across the batch and group
modulated_deformable_im2col_cuda(THCState_getCurrentStream(state),
input_n.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
columns.data<scalar_t>());
long m_ = channels_out;
long n_ = channels * kernel_h * kernel_w;
long k_ = height_out * width_out;
THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f,
columns.data<scalar_t>(), k_,
grad_output_n.data<scalar_t>(), k_, 1.0f,
grad_weight.data<scalar_t>(), n_);
// gradient w.r.t. bias
// long m_ = channels_out;
// long k__ = height_out * width_out;
THCudaBlas_Sgemv(state,
't',
k_, m_, 1.0f,
grad_output_n.data<scalar_t>(), k_,
ones.data<scalar_t>(), 1, 1.0f,
grad_bias.data<scalar_t>(), 1);
}
return {
grad_input, grad_offset, grad_mask, grad_weight, grad_bias
};
}
|
67fb4a921dc448ff170226900378bf338ce83d19.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <string>
#include <vector>
#include <sstream>
#include <hip/hip_runtime.h>
#include <iterator>
using namespace std;
__global__ void multiply(int *A, int *B, int *C, int N) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i = idx / N, j = idx % N;
int sum = 0;
for (int k = 0; k < N; k++) {
sum += A[i * N + k] * B[k * N + j];
}
C[idx] = sum;
}
int main() {
string line;
int N = -1;
int *A, *B, *C, *cur;
int count = 0;
while(getline(cin, line)) {
if (N == -1) {
N = atoi(line.c_str());
A = new int[N * N];
B = new int[N * N];
C = new int[N * N];
cur = A;
} else {
vector<string> nums;
istringstream iss(line);
copy(istream_iterator<string>(iss),
istream_iterator<string>(),
back_inserter(nums));
for (int i = 0; i < nums.size(); i++) {
cur[count * N + i] = atoi(nums[i].c_str());
}
count++;
if (count == N) {
count = 0;
cur = B;
}
}
}
int *dA, *dB, *dC;
hipMalloc(&dA, sizeof(int) * N * N);
hipMalloc(&dB, sizeof(int) * N * N);
hipMalloc(&dC, sizeof(int) * N * N);
hipMemcpy(dA, A, sizeof(int) * N * N, hipMemcpyHostToDevice);
hipMemcpy(dB, B, sizeof(int) * N * N, hipMemcpyHostToDevice);
hipMemcpy(dC, C, sizeof(int) * N * N, hipMemcpyHostToDevice);
dim3 blockDim(64, 1, 1);
dim3 gridDim(N * N / blockDim.x, 1, 1);
hipLaunchKernelGGL(( multiply), dim3(gridDim), dim3(blockDim), 0, 0, dA, dB, dC, N);
hipMemcpy(dA, A, sizeof(int) * N * N, hipMemcpyDeviceToHost);
hipMemcpy(dB, B, sizeof(int) * N * N, hipMemcpyDeviceToHost);
hipMemcpy(dC, C, sizeof(int) * N * N, hipMemcpyDeviceToHost);
return 0;
}
|
67fb4a921dc448ff170226900378bf338ce83d19.cu
|
#include <iostream>
#include <stdlib.h>
#include <string>
#include <vector>
#include <sstream>
#include <cuda.h>
#include <iterator>
using namespace std;
__global__ void multiply(int *A, int *B, int *C, int N) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i = idx / N, j = idx % N;
int sum = 0;
for (int k = 0; k < N; k++) {
sum += A[i * N + k] * B[k * N + j];
}
C[idx] = sum;
}
int main() {
string line;
int N = -1;
int *A, *B, *C, *cur;
int count = 0;
while(getline(cin, line)) {
if (N == -1) {
N = atoi(line.c_str());
A = new int[N * N];
B = new int[N * N];
C = new int[N * N];
cur = A;
} else {
vector<string> nums;
istringstream iss(line);
copy(istream_iterator<string>(iss),
istream_iterator<string>(),
back_inserter(nums));
for (int i = 0; i < nums.size(); i++) {
cur[count * N + i] = atoi(nums[i].c_str());
}
count++;
if (count == N) {
count = 0;
cur = B;
}
}
}
int *dA, *dB, *dC;
cudaMalloc(&dA, sizeof(int) * N * N);
cudaMalloc(&dB, sizeof(int) * N * N);
cudaMalloc(&dC, sizeof(int) * N * N);
cudaMemcpy(dA, A, sizeof(int) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, sizeof(int) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(dC, C, sizeof(int) * N * N, cudaMemcpyHostToDevice);
dim3 blockDim(64, 1, 1);
dim3 gridDim(N * N / blockDim.x, 1, 1);
multiply<<<gridDim, blockDim>>>(dA, dB, dC, N);
cudaMemcpy(dA, A, sizeof(int) * N * N, cudaMemcpyDeviceToHost);
cudaMemcpy(dB, B, sizeof(int) * N * N, cudaMemcpyDeviceToHost);
cudaMemcpy(dC, C, sizeof(int) * N * N, cudaMemcpyDeviceToHost);
return 0;
}
|
c2f0f460ab35dad9f3bf4e03dc223f6cb4eff1e2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/random.h>
#include <thrust/sequence.h>
#include "csv.hpp"
#include "timer.h"
using namespace std;
struct is_eq_zero {
__host__ __device__ bool operator()(long a) const {
return (a==0);
}
};
std::vector<std::string> split_string_2(std::string str, char del) {
int first = 0;
int last = str.find_first_of(del);
std::vector<std::string> result;
while (first < str.size()) {
std::string subStr(str, first, last - first);
result.push_back(subStr);
first = last + 1;
last = str.find_first_of(del, first);
if (last == std::string::npos) {
last = str.size();
}
}
return result;
}
int main(int argc, char **argv)
{
int N = atoi(argv[2]);
int INTVL = atoi(argv[3]);
unsigned int t, travdirtime;
int dev = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s \n", dev, deviceProp.name);
hipSetDevice(dev);
unsigned long long start_time = 20190114000000000;
if (argc < 4) {
printf("Usage: ./2 file_name nLines span IP_address \n"); return 0;
}
char del = '.';
// std::string stringIP;
std::string IPstring;
std::string stringIP = argv[4];
for (const auto subStr : split_string_2(stringIP, del)) {
unsigned long ipaddr_src;
ipaddr_src = atoi(subStr.c_str());
std::bitset<8> trans = std::bitset<8>(ipaddr_src);
std::string trans_string = trans.to_string();
IPstring = IPstring + trans_string;
}
unsigned long long s = bitset<32>(IPstring).to_ullong();
std::cout << "match:" << stringIP << "," << IPstring << "," << s << std::endl;
thrust::host_vector<unsigned long long> h_timestamp(N);
thrust::host_vector<long> h_sourceIP(N);
thrust::host_vector<long> h_IP_to_match(N);
const string csv_file = std::string(argv[1]);
vector<vector<string>> data;
Csv objCsv(csv_file);
if (!objCsv.getCsv(data)) {
std::cout << "read ERROR" << std::endl;
return 1;
}
/* hard coded */
for (int row = 0; row < data.size(); row++) {
vector<string> rec = data[row];
h_timestamp[row] = stoll(rec[0]);
h_sourceIP[row] = stol(rec[1]);
// h_IP_to_match[row] = 2639437048;
h_IP_to_match[row] = s;
}
// thrust::host_vector<long long> h_timestamp(N);
thrust::host_vector<unsigned long long> h_out(N);
thrust::host_vector<unsigned long long> h_out_2(N);
for(int i=0; i < N; i++)
{
h_out[i] = (h_timestamp[i] - start_time) / INTVL;
}
for(int i=0; i < N; i++)
{
h_out_2[i] = (h_out[i] * INTVL) + start_time;
}
for(int i=0; i < 5; i++)
std::cout << h_timestamp[i] << "," << h_out_2[i] << std::endl;
thrust::device_vector<long long> d_timestamp(N);
thrust::device_vector<long long> d_out(N);
thrust::device_vector<long long> d_out_2(N);
thrust::copy(h_timestamp.begin(), h_timestamp.end(), d_timestamp.begin());
thrust::copy(h_out.begin(), h_out.end(), d_out.begin());
thrust::copy(h_out_2.begin(), h_out_2.end(), d_out_2.begin());
thrust::device_vector<long> d_IP_to_match(N);
thrust::device_vector<long> d_sourceIP(N);
thrust::copy(h_IP_to_match.begin(), h_IP_to_match.end(), d_IP_to_match.begin());
thrust::copy(h_sourceIP.begin(), h_sourceIP.end(), d_sourceIP.begin());
thrust::device_vector<unsigned long> dev_c(N);
thrust::transform(begin(d_sourceIP), end(d_sourceIP), // dev_a for input
begin(d_IP_to_match), // dev_b for input
begin(dev_c), // dev_c for output
[] __device__ (long x, long y) -> unsigned long { return x - y; });
thrust::device_vector<long long> dev_c_2(N);
thrust::copy(dev_c.begin(), dev_c.end(), dev_c_2.begin());
thrust::sort_by_key(dev_c.begin(), dev_c.end(), d_sourceIP.begin());
thrust::sort_by_key(dev_c_2.begin(), dev_c_2.end(), d_out_2.begin());
for(int i=0; i < 10; i++)
std::cout << dev_c[i] << "," << d_sourceIP[i] << "," << d_out_2[i] << std::endl;
int N_count = thrust::count_if(dev_c.begin(), dev_c.end(), is_eq_zero());
thrust::device_vector<long> d_b(N_count);
thrust::copy_if(dev_c.begin(), dev_c.end(), d_b.begin(), is_eq_zero());
std::cout << d_b.size() << std::endl;
std::cout << endl;
cout << "writing file..." << endl;
std::remove("tmp");
ofstream outputfile("tmp");
bitset<32> bs(d_sourceIP[0]);
string bs1 = bs.to_string().substr(0,8);
int bi1 = bitset<8>(bs1).to_ulong();
string bs2 = bs.to_string().substr(8,8);
int bi2 = bitset<8>(bs2).to_ulong();
string bs3 = bs.to_string().substr(16,8);
int bi3 = bitset<8>(bs3).to_ulong();
string bs4 = bs.to_string().substr(24,8);
int bi4 = bitset<8>(bs4).to_ulong();
string sourceIP = to_string(bi1) + "." + to_string(bi2) + "." + to_string(bi3) + "." + to_string(bi4);
for(int i=0; i < d_b.size(); i++)
outputfile << sourceIP << "," << d_out_2[i] << std::endl;
outputfile.close();
return EXIT_SUCCESS;
}
|
c2f0f460ab35dad9f3bf4e03dc223f6cb4eff1e2.cu
|
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <cuda_runtime.h>
#include <stdio.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/random.h>
#include <thrust/sequence.h>
#include "csv.hpp"
#include "timer.h"
using namespace std;
struct is_eq_zero {
__host__ __device__ bool operator()(long a) const {
return (a==0);
}
};
std::vector<std::string> split_string_2(std::string str, char del) {
int first = 0;
int last = str.find_first_of(del);
std::vector<std::string> result;
while (first < str.size()) {
std::string subStr(str, first, last - first);
result.push_back(subStr);
first = last + 1;
last = str.find_first_of(del, first);
if (last == std::string::npos) {
last = str.size();
}
}
return result;
}
int main(int argc, char **argv)
{
int N = atoi(argv[2]);
int INTVL = atoi(argv[3]);
unsigned int t, travdirtime;
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s \n", dev, deviceProp.name);
cudaSetDevice(dev);
unsigned long long start_time = 20190114000000000;
if (argc < 4) {
printf("Usage: ./2 file_name nLines span IP_address \n"); return 0;
}
char del = '.';
// std::string stringIP;
std::string IPstring;
std::string stringIP = argv[4];
for (const auto subStr : split_string_2(stringIP, del)) {
unsigned long ipaddr_src;
ipaddr_src = atoi(subStr.c_str());
std::bitset<8> trans = std::bitset<8>(ipaddr_src);
std::string trans_string = trans.to_string();
IPstring = IPstring + trans_string;
}
unsigned long long s = bitset<32>(IPstring).to_ullong();
std::cout << "match:" << stringIP << "," << IPstring << "," << s << std::endl;
thrust::host_vector<unsigned long long> h_timestamp(N);
thrust::host_vector<long> h_sourceIP(N);
thrust::host_vector<long> h_IP_to_match(N);
const string csv_file = std::string(argv[1]);
vector<vector<string>> data;
Csv objCsv(csv_file);
if (!objCsv.getCsv(data)) {
std::cout << "read ERROR" << std::endl;
return 1;
}
/* hard coded */
for (int row = 0; row < data.size(); row++) {
vector<string> rec = data[row];
h_timestamp[row] = stoll(rec[0]);
h_sourceIP[row] = stol(rec[1]);
// h_IP_to_match[row] = 2639437048;
h_IP_to_match[row] = s;
}
// thrust::host_vector<long long> h_timestamp(N);
thrust::host_vector<unsigned long long> h_out(N);
thrust::host_vector<unsigned long long> h_out_2(N);
for(int i=0; i < N; i++)
{
h_out[i] = (h_timestamp[i] - start_time) / INTVL;
}
for(int i=0; i < N; i++)
{
h_out_2[i] = (h_out[i] * INTVL) + start_time;
}
for(int i=0; i < 5; i++)
std::cout << h_timestamp[i] << "," << h_out_2[i] << std::endl;
thrust::device_vector<long long> d_timestamp(N);
thrust::device_vector<long long> d_out(N);
thrust::device_vector<long long> d_out_2(N);
thrust::copy(h_timestamp.begin(), h_timestamp.end(), d_timestamp.begin());
thrust::copy(h_out.begin(), h_out.end(), d_out.begin());
thrust::copy(h_out_2.begin(), h_out_2.end(), d_out_2.begin());
thrust::device_vector<long> d_IP_to_match(N);
thrust::device_vector<long> d_sourceIP(N);
thrust::copy(h_IP_to_match.begin(), h_IP_to_match.end(), d_IP_to_match.begin());
thrust::copy(h_sourceIP.begin(), h_sourceIP.end(), d_sourceIP.begin());
thrust::device_vector<unsigned long> dev_c(N);
thrust::transform(begin(d_sourceIP), end(d_sourceIP), // dev_a for input
begin(d_IP_to_match), // dev_b for input
begin(dev_c), // dev_c for output
[] __device__ (long x, long y) -> unsigned long { return x - y; });
thrust::device_vector<long long> dev_c_2(N);
thrust::copy(dev_c.begin(), dev_c.end(), dev_c_2.begin());
thrust::sort_by_key(dev_c.begin(), dev_c.end(), d_sourceIP.begin());
thrust::sort_by_key(dev_c_2.begin(), dev_c_2.end(), d_out_2.begin());
for(int i=0; i < 10; i++)
std::cout << dev_c[i] << "," << d_sourceIP[i] << "," << d_out_2[i] << std::endl;
int N_count = thrust::count_if(dev_c.begin(), dev_c.end(), is_eq_zero());
thrust::device_vector<long> d_b(N_count);
thrust::copy_if(dev_c.begin(), dev_c.end(), d_b.begin(), is_eq_zero());
std::cout << d_b.size() << std::endl;
std::cout << endl;
cout << "writing file..." << endl;
std::remove("tmp");
ofstream outputfile("tmp");
bitset<32> bs(d_sourceIP[0]);
string bs1 = bs.to_string().substr(0,8);
int bi1 = bitset<8>(bs1).to_ulong();
string bs2 = bs.to_string().substr(8,8);
int bi2 = bitset<8>(bs2).to_ulong();
string bs3 = bs.to_string().substr(16,8);
int bi3 = bitset<8>(bs3).to_ulong();
string bs4 = bs.to_string().substr(24,8);
int bi4 = bitset<8>(bs4).to_ulong();
string sourceIP = to_string(bi1) + "." + to_string(bi2) + "." + to_string(bi3) + "." + to_string(bi4);
for(int i=0; i < d_b.size(); i++)
outputfile << sourceIP << "," << d_out_2[i] << std::endl;
outputfile.close();
return EXIT_SUCCESS;
}
|
97fd4f3b11a347d06bb090c13b6f2cfca90e8fd7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "box2d2r-512-9-512_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 49
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_9), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.03125f * A[t%2][i-2][j-2] +
0.03126f * A[t%2][i-2][j-1] +
0.03127f * A[t%2][i-2][j] +
0.03128f * A[t%2][i-2][j+1] +
0.03129f * A[t%2][i-2][j+2] +
0.03130f * A[t%2][i-1][j-2] +
0.03131f * A[t%2][i-1][j-1] +
0.03132f * A[t%2][i-1][j] +
0.03133f * A[t%2][i-1][j+1] +
0.03134f * A[t%2][i-1][j+2] +
0.03135f * A[t%2][i][j-2] +
0.03136f * A[t%2][i][j-1] +
0.24712f * A[t%2][i][j] +
0.03138f * A[t%2][i][j+1] +
0.03139f * A[t%2][i][j+2] +
0.03140f * A[t%2][i+1][j-2] +
0.03141f * A[t%2][i+1][j-1] +
0.03142f * A[t%2][i+1][j] +
0.03143f * A[t%2][i+1][j+1] +
0.03144f * A[t%2][i+1][j+2] +
0.03145f * A[t%2][i+2][j-2] +
0.03146f * A[t%2][i+2][j-1] +
0.03147f * A[t%2][i+2][j] +
0.03148f * A[t%2][i+2][j+1] +
0.03149f * A[t%2][i+2][j+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
97fd4f3b11a347d06bb090c13b6f2cfca90e8fd7.cu
|
#include <assert.h>
#include <stdio.h>
#include "box2d2r-512-9-512_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 49
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_9<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.03125f * A[t%2][i-2][j-2] +
0.03126f * A[t%2][i-2][j-1] +
0.03127f * A[t%2][i-2][j] +
0.03128f * A[t%2][i-2][j+1] +
0.03129f * A[t%2][i-2][j+2] +
0.03130f * A[t%2][i-1][j-2] +
0.03131f * A[t%2][i-1][j-1] +
0.03132f * A[t%2][i-1][j] +
0.03133f * A[t%2][i-1][j+1] +
0.03134f * A[t%2][i-1][j+2] +
0.03135f * A[t%2][i][j-2] +
0.03136f * A[t%2][i][j-1] +
0.24712f * A[t%2][i][j] +
0.03138f * A[t%2][i][j+1] +
0.03139f * A[t%2][i][j+2] +
0.03140f * A[t%2][i+1][j-2] +
0.03141f * A[t%2][i+1][j-1] +
0.03142f * A[t%2][i+1][j] +
0.03143f * A[t%2][i+1][j+1] +
0.03144f * A[t%2][i+1][j+2] +
0.03145f * A[t%2][i+2][j-2] +
0.03146f * A[t%2][i+2][j-1] +
0.03147f * A[t%2][i+2][j] +
0.03148f * A[t%2][i+2][j+1] +
0.03149f * A[t%2][i+2][j+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
24d9aeeb520462eb28ff09d2ec8e56e430d052f0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layers/log_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
void LogLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int_tp count = bottom[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) {
caffe_gpu_log(count, bottom_data, top_data);
} else {
caffe_copy(count, bottom_data, top_data);
if (input_scale_ != Dtype(1)) {
caffe_gpu_scal(count, input_scale_, top_data);
}
if (input_shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, input_shift_, top_data);
}
caffe_gpu_log(count, top_data, top_data);
}
if (base_scale_ != Dtype(1)) {
caffe_gpu_scal(count, base_scale_, top_data);
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) {
greentea_gpu_log<Dtype>(this->device_->id(), count,
(cl_mem) bottom_data, 0, (cl_mem) top_data, 0);
} else {
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0,
&ctx);
if (input_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count,
input_scale_, (cl_mem) top_data, 0);
}
if (input_shift_ != Dtype(0)) {
greentea_gpu_add_scalar<Dtype>(this->device_->id(), count,
input_shift_, (cl_mem) top_data, 0);
}
greentea_gpu_log<Dtype>(this->device_->id(), count,
(cl_mem) top_data, 0, (cl_mem) top_data, 0);
}
if (base_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count, base_scale_,
(cl_mem) top_data, 0);
}
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void LogLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const int_tp count = bottom[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_copy(count, bottom_data, bottom_diff);
if (input_scale_ != Dtype(1)) {
caffe_gpu_scal(count, input_scale_, bottom_diff);
}
if (input_shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, input_shift_, bottom_diff);
}
caffe_gpu_powx(count, bottom_diff, Dtype(-1), bottom_diff);
if (backward_num_scale_ != Dtype(1)) {
caffe_gpu_scal(count, backward_num_scale_, bottom_diff);
}
caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) bottom_diff,
0, &ctx);
if (input_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count, input_scale_,
(cl_mem) bottom_diff, 0);
}
if (input_shift_ != Dtype(0)) {
greentea_gpu_add_scalar<Dtype>(this->device_->id(), count,
input_shift_, (cl_mem) bottom_diff, 0);
}
greentea_gpu_powx<Dtype>(this->device_->id(), count,
(cl_mem) bottom_diff, 0, Dtype(-1),
(cl_mem) bottom_diff, 0);
if (backward_num_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count,
backward_num_scale_, (cl_mem) bottom_diff, 0);
}
greentea_gpu_mul<Dtype>(this->device_->id(), count,
(cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0,
(cl_mem) bottom_diff, 0);
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LogLayer);
} // namespace caffe
|
24d9aeeb520462eb28ff09d2ec8e56e430d052f0.cu
|
#include <vector>
#include "caffe/layers/log_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
void LogLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int_tp count = bottom[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) {
caffe_gpu_log(count, bottom_data, top_data);
} else {
caffe_copy(count, bottom_data, top_data);
if (input_scale_ != Dtype(1)) {
caffe_gpu_scal(count, input_scale_, top_data);
}
if (input_shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, input_shift_, top_data);
}
caffe_gpu_log(count, top_data, top_data);
}
if (base_scale_ != Dtype(1)) {
caffe_gpu_scal(count, base_scale_, top_data);
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) {
greentea_gpu_log<Dtype>(this->device_->id(), count,
(cl_mem) bottom_data, 0, (cl_mem) top_data, 0);
} else {
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0,
&ctx);
if (input_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count,
input_scale_, (cl_mem) top_data, 0);
}
if (input_shift_ != Dtype(0)) {
greentea_gpu_add_scalar<Dtype>(this->device_->id(), count,
input_shift_, (cl_mem) top_data, 0);
}
greentea_gpu_log<Dtype>(this->device_->id(), count,
(cl_mem) top_data, 0, (cl_mem) top_data, 0);
}
if (base_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count, base_scale_,
(cl_mem) top_data, 0);
}
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void LogLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const int_tp count = bottom[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_copy(count, bottom_data, bottom_diff);
if (input_scale_ != Dtype(1)) {
caffe_gpu_scal(count, input_scale_, bottom_diff);
}
if (input_shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, input_shift_, bottom_diff);
}
caffe_gpu_powx(count, bottom_diff, Dtype(-1), bottom_diff);
if (backward_num_scale_ != Dtype(1)) {
caffe_gpu_scal(count, backward_num_scale_, bottom_diff);
}
caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) bottom_diff,
0, &ctx);
if (input_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count, input_scale_,
(cl_mem) bottom_diff, 0);
}
if (input_shift_ != Dtype(0)) {
greentea_gpu_add_scalar<Dtype>(this->device_->id(), count,
input_shift_, (cl_mem) bottom_diff, 0);
}
greentea_gpu_powx<Dtype>(this->device_->id(), count,
(cl_mem) bottom_diff, 0, Dtype(-1),
(cl_mem) bottom_diff, 0);
if (backward_num_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count,
backward_num_scale_, (cl_mem) bottom_diff, 0);
}
greentea_gpu_mul<Dtype>(this->device_->id(), count,
(cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0,
(cl_mem) bottom_diff, 0);
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LogLayer);
} // namespace caffe
|
e3fbe4bec6b71396a6d2a55a7f5002c183895890.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/operators/ssd/box_encoder.cuh"
#include <hip/hip_runtime.h>
#include <vector>
#include <utility>
namespace dali {
__host__ __device__ inline float4 ToCenterWidthHeight(const float4 &box) {
return {
0.5f * (box.x + box.z),
0.5f * (box.y + box.w),
box.z - box.x,
box.w - box.y};
}
void BoxEncoder<GPUBackend>::PrepareAnchors(const vector<float> &anchors) {
DALI_ENFORCE(
(anchors.size() % BoundingBox::size) == 0,
"Anchors size must be divisible by 4, actual value = " + std::to_string(anchors.size()));
anchors_count_ = anchors.size() / BoundingBox::size;
anchors_.Resize({anchors_count_, static_cast<int64_t>(BoundingBox::size)});
anchors_as_center_wh_.Resize({anchors_count_, static_cast<int64_t>(BoundingBox::size)});
auto anchors_data_cpu = reinterpret_cast<const float4 *>(anchors.data());
vector<float4> anchors_as_center_wh(anchors_count_);
for (unsigned int anchor = 0; anchor < anchors_count_; ++anchor)
anchors_as_center_wh[anchor] = ToCenterWidthHeight(anchors_data_cpu[anchor]);
auto anchors_data = anchors_.mutable_data<float>();
auto anchors_as_center_wh_data = anchors_as_center_wh_.mutable_data<float>();
MemCopy(anchors_data, anchors.data(), anchors_count_ * BoundingBox::size * sizeof(float));
MemCopy(
anchors_as_center_wh_data,
anchors_as_center_wh.data(),
anchors_count_ * BoundingBox::size * sizeof(float));
}
__device__ __forceinline__ float CalculateIou(const float4 &b1, const float4 &b2) {
float l = cuda_max(b1.x, b2.x);
float t = cuda_max(b1.y, b2.y);
float r = cuda_min(b1.z, b2.z);
float b = cuda_min(b1.w, b2.w);
float first = cuda_max(r - l, 0.0f);
float second = cuda_max(b - t, 0.0f);
volatile float intersection = first * second;
volatile float area1 = (b1.w - b1.y) * (b1.z - b1.x);
volatile float area2 = (b2.w - b2.y) * (b2.z - b2.x);
return intersection / (area1 + area2 - intersection);
}
__device__ inline void FindBestMatch(const int N, volatile float *vals, volatile int *idx) {
for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (threadIdx.x < stride) {
if (vals[threadIdx.x] <= vals[threadIdx.x + stride]) {
if (vals[threadIdx.x] == vals[threadIdx.x + stride]) {
idx[threadIdx.x] = cuda_max(idx[threadIdx.x], idx[threadIdx.x + stride]);
} else {
vals[threadIdx.x] = vals[threadIdx.x + stride];
idx[threadIdx.x] = idx[threadIdx.x + stride];
}
}
}
__syncthreads();
}
}
__device__ float4 MatchOffsets(
float4 box, float4 anchor, const float *means, const float *stds, float scale) {
box.x *= scale; box.y *= scale; box.z *= scale; box.w *= scale;
anchor.x *= scale; anchor.y *= scale; anchor.z *= scale; anchor.w *= scale;
float x = ((box.x - anchor.x) / anchor.z - means[0]) / stds[0];
float y = ((box.y - anchor.y) / anchor.w - means[1]) / stds[1];
float z = (log(box.z / anchor.z) - means[2]) / stds[2];
float w = (log(box.w / anchor.w) - means[3]) / stds[3];
return {x, y, z, w};
}
__device__ void WriteMatchesToOutput(
unsigned int anchors_count, float criteria, int *labels_out, const int *labels_in,
float4 *boxes_out, const float4 *boxes_in,
volatile int *best_box_idx, volatile float *best_box_iou, bool offset,
const float* means, const float* stds, float scale, const float4 *anchors_as_cwh) {
for (unsigned int anchor = threadIdx.x; anchor < anchors_count; anchor += blockDim.x) {
if (best_box_iou[anchor] > criteria) {
int box_idx = best_box_idx[anchor];
labels_out[anchor] = labels_in[box_idx];
float4 box = boxes_in[box_idx];
if (!offset)
boxes_out[anchor] = ToCenterWidthHeight(box);
else
boxes_out[anchor] = MatchOffsets(
ToCenterWidthHeight(box), anchors_as_cwh[anchor], means, stds, scale);
}
}
}
__device__ void MatchBoxWithAnchors(
const float4 &box, const int box_idx, unsigned int anchors_count, const float4 *anchors,
volatile int *best_anchor_idx_tmp, volatile float *best_anchor_iou_tmp,
volatile int *best_box_idx, volatile float *best_box_iou) {
float best_anchor_iou = -1.0f;
int best_anchor_idx = -1;
for (unsigned int anchor = threadIdx.x; anchor < anchors_count; anchor += blockDim.x) {
float new_val = CalculateIou(box, anchors[anchor]);
if (new_val >= best_anchor_iou) {
best_anchor_iou = new_val;
best_anchor_idx = anchor;
}
if (new_val >= best_box_iou[anchor]) {
best_box_iou[anchor] = new_val;
best_box_idx[anchor] = box_idx;
}
}
best_anchor_iou_tmp[threadIdx.x] = best_anchor_iou;
best_anchor_idx_tmp[threadIdx.x] = best_anchor_idx;
}
template <int BLOCK_SIZE>
__global__ void Encode(
const float4 *boxes_in, const int *labels_in, const int *offsets, const int anchors_count,
const float4 *anchors, const float criteria, float4 *boxes_out, int *labels_out,
int *box_idx_buffer, float *box_iou_buffer, bool offset, const float* means,
const float* stds, float scale, const float4 *anchors_as_cwh) {
const int sample = blockIdx.x;
// Remark: This algorithm is very fragile to floating point arithmetic effects.
// For now, excessive use of volatile in this code,
// makes it conform to reference solution in terms of resulting encoding.
__shared__ volatile int best_anchor_idx_tmp[BLOCK_SIZE];
__shared__ volatile float best_anchor_iou_tmp[BLOCK_SIZE];
volatile int *best_box_idx = box_idx_buffer + sample * anchors_count;
volatile float *best_box_iou = box_iou_buffer + sample * anchors_count;
int box_idx = 0;
for (int box_global_idx = offsets[sample]; box_global_idx < offsets[sample+1]; ++box_global_idx) {
MatchBoxWithAnchors(
boxes_in[box_global_idx],
box_idx,
anchors_count,
anchors,
best_anchor_idx_tmp,
best_anchor_iou_tmp,
best_box_idx,
best_box_iou);
__syncthreads();
FindBestMatch(blockDim.x, best_anchor_iou_tmp, best_anchor_idx_tmp);
__syncthreads();
if (threadIdx.x == 0) {
int idx = best_anchor_idx_tmp[0];
best_box_idx[idx] = box_idx;
best_box_iou[idx] = 2.f;
}
__syncthreads();
box_idx++;
}
__syncthreads();
WriteMatchesToOutput(
anchors_count,
criteria,
labels_out + sample * anchors_count,
labels_in + offsets[sample],
boxes_out + sample * anchors_count,
boxes_in + offsets[sample],
best_box_idx,
best_box_iou,
offset,
means,
stds,
scale,
anchors_as_cwh);
}
std::pair<int *, float *> BoxEncoder<GPUBackend>::ClearBuffers(const hipStream_t &stream) {
auto best_box_idx_data = best_box_idx_.mutable_data<int>();
auto best_box_iou_data = best_box_iou_.mutable_data<float>();
CUDA_CALL(hipMemsetAsync(
best_box_idx_data, 0, curr_batch_size_ * anchors_count_ * sizeof(int), stream));
CUDA_CALL(hipMemsetAsync(
best_box_iou_data, 0, curr_batch_size_ * anchors_count_ * sizeof(float), stream));
return {best_box_idx_data, best_box_iou_data};
}
void BoxEncoder<GPUBackend>::WriteAnchorsToOutput(
float4 *boxes_out_data, int *labels_out_data, const hipStream_t &stream) {
CUDA_CALL(hipMemsetAsync(
labels_out_data,
0,
curr_batch_size_ * anchors_count_ * sizeof(int), stream));
for (int sample = 0; sample < curr_batch_size_; ++sample)
MemCopy(
boxes_out_data + sample * anchors_count_,
anchors_as_center_wh_.data<float>(),
anchors_count_ * BoundingBox::size * sizeof(float),
stream);
}
void BoxEncoder<GPUBackend>::ClearOutput(
float4 *boxes_out_data, int *labels_out_data, const hipStream_t &stream) {
CUDA_CALL(hipMemsetAsync(
labels_out_data,
0,
curr_batch_size_ * anchors_count_ * sizeof(int),
stream));
for (int sample = 0; sample < curr_batch_size_; ++sample)
CUDA_CALL(hipMemsetAsync(
boxes_out_data + sample * anchors_count_,
0,
anchors_count_ * BoundingBox::size * sizeof(float),
stream));
}
std::pair<TensorListShape<>, TensorListShape<>>
BoxEncoder<GPUBackend>::CalculateDims(
const TensorList<GPUBackend> &boxes_input) {
TensorListShape<> boxes_output_shape(boxes_input.ntensor(), kBoxesOutputDim);
TensorListShape<> labels_output_shape(boxes_input.ntensor(), kLabelsOutputDim);
for (size_t i = 0; i < boxes_input.ntensor(); i++) {
boxes_output_shape.set_tensor_shape(i,
{anchors_count_, static_cast<int64_t>(BoundingBox::size)});
labels_output_shape.set_tensor_shape(i, {anchors_count_});
}
return {boxes_output_shape, labels_output_shape};
}
int *BoxEncoder<GPUBackend>::CalculateBoxesOffsets(
const TensorList<GPUBackend> &boxes_input, const hipStream_t &stream) {
vector<int> offsets {0};
for (int i = 0; i < boxes_input.shape().size(); i++)
offsets.push_back(boxes_input.shape().tensor_shape_span(i)[0] + offsets.back());
auto offsets_data = boxes_offsets_.mutable_data<int>();
MemCopy(offsets_data, offsets.data(), (curr_batch_size_ + 1) * sizeof(int), stream);
return offsets_data;
}
void BoxEncoder<GPUBackend>::RunImpl(Workspace<GPUBackend> &ws) {
const auto &boxes_input = ws.Input<GPUBackend>(kBoxesInId);
const auto &labels_input = ws.Input<GPUBackend>(kLabelsInId);
assert(ws.GetInputBatchSize(kBoxesInId) == ws.GetInputBatchSize(kLabelsInId));
auto curr_batch_size = ws.GetInputBatchSize(kBoxesInId);
const auto anchors_data = reinterpret_cast<const float4 *>(anchors_.data<float>());
const auto anchors_as_cwh_data =
reinterpret_cast<const float4 *>(anchors_as_center_wh_.data<float>());
const auto boxes_data = reinterpret_cast<const float4 *>(boxes_input.data<float>());
const auto labels_data = labels_input.data<int>();
const auto buffers = ClearBuffers(ws.stream());
auto boxes_offsets_data = CalculateBoxesOffsets(boxes_input, ws.stream());
auto dims = CalculateDims(boxes_input);
auto &boxes_output = ws.Output<GPUBackend>(kBoxesOutId);
boxes_output.set_type(boxes_input.type());
boxes_output.Resize(dims.first);
auto boxes_out_data = reinterpret_cast<float4 *>(boxes_output.mutable_data<float>());
auto &labels_output = ws.Output<GPUBackend>(kLabelsOutId);
labels_output.set_type(labels_input.type());
labels_output.Resize(dims.second);
auto labels_out_data = labels_output.mutable_data<int>();
const auto means_data = means_.data<float>();
const auto stds_data = stds_.data<float>();
if (!offset_)
WriteAnchorsToOutput(boxes_out_data, labels_out_data, ws.stream());
else
ClearOutput(boxes_out_data, labels_out_data, ws.stream());
hipLaunchKernelGGL(( Encode<BlockSize>), dim3(curr_batch_size), dim3(BlockSize), 0, ws.stream(),
boxes_data,
labels_data,
boxes_offsets_data,
anchors_count_,
anchors_data,
criteria_,
boxes_out_data,
labels_out_data,
buffers.first,
buffers.second,
offset_,
means_data,
stds_data,
scale_,
anchors_as_cwh_data);
}
DALI_REGISTER_OPERATOR(BoxEncoder, BoxEncoder<GPUBackend>, GPU);
} // namespace dali
|
e3fbe4bec6b71396a6d2a55a7f5002c183895890.cu
|
// Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/operators/ssd/box_encoder.cuh"
#include <cuda.h>
#include <vector>
#include <utility>
namespace dali {
__host__ __device__ inline float4 ToCenterWidthHeight(const float4 &box) {
return {
0.5f * (box.x + box.z),
0.5f * (box.y + box.w),
box.z - box.x,
box.w - box.y};
}
void BoxEncoder<GPUBackend>::PrepareAnchors(const vector<float> &anchors) {
DALI_ENFORCE(
(anchors.size() % BoundingBox::size) == 0,
"Anchors size must be divisible by 4, actual value = " + std::to_string(anchors.size()));
anchors_count_ = anchors.size() / BoundingBox::size;
anchors_.Resize({anchors_count_, static_cast<int64_t>(BoundingBox::size)});
anchors_as_center_wh_.Resize({anchors_count_, static_cast<int64_t>(BoundingBox::size)});
auto anchors_data_cpu = reinterpret_cast<const float4 *>(anchors.data());
vector<float4> anchors_as_center_wh(anchors_count_);
for (unsigned int anchor = 0; anchor < anchors_count_; ++anchor)
anchors_as_center_wh[anchor] = ToCenterWidthHeight(anchors_data_cpu[anchor]);
auto anchors_data = anchors_.mutable_data<float>();
auto anchors_as_center_wh_data = anchors_as_center_wh_.mutable_data<float>();
MemCopy(anchors_data, anchors.data(), anchors_count_ * BoundingBox::size * sizeof(float));
MemCopy(
anchors_as_center_wh_data,
anchors_as_center_wh.data(),
anchors_count_ * BoundingBox::size * sizeof(float));
}
__device__ __forceinline__ float CalculateIou(const float4 &b1, const float4 &b2) {
float l = cuda_max(b1.x, b2.x);
float t = cuda_max(b1.y, b2.y);
float r = cuda_min(b1.z, b2.z);
float b = cuda_min(b1.w, b2.w);
float first = cuda_max(r - l, 0.0f);
float second = cuda_max(b - t, 0.0f);
volatile float intersection = first * second;
volatile float area1 = (b1.w - b1.y) * (b1.z - b1.x);
volatile float area2 = (b2.w - b2.y) * (b2.z - b2.x);
return intersection / (area1 + area2 - intersection);
}
__device__ inline void FindBestMatch(const int N, volatile float *vals, volatile int *idx) {
for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (threadIdx.x < stride) {
if (vals[threadIdx.x] <= vals[threadIdx.x + stride]) {
if (vals[threadIdx.x] == vals[threadIdx.x + stride]) {
idx[threadIdx.x] = cuda_max(idx[threadIdx.x], idx[threadIdx.x + stride]);
} else {
vals[threadIdx.x] = vals[threadIdx.x + stride];
idx[threadIdx.x] = idx[threadIdx.x + stride];
}
}
}
__syncthreads();
}
}
__device__ float4 MatchOffsets(
float4 box, float4 anchor, const float *means, const float *stds, float scale) {
box.x *= scale; box.y *= scale; box.z *= scale; box.w *= scale;
anchor.x *= scale; anchor.y *= scale; anchor.z *= scale; anchor.w *= scale;
float x = ((box.x - anchor.x) / anchor.z - means[0]) / stds[0];
float y = ((box.y - anchor.y) / anchor.w - means[1]) / stds[1];
float z = (log(box.z / anchor.z) - means[2]) / stds[2];
float w = (log(box.w / anchor.w) - means[3]) / stds[3];
return {x, y, z, w};
}
__device__ void WriteMatchesToOutput(
unsigned int anchors_count, float criteria, int *labels_out, const int *labels_in,
float4 *boxes_out, const float4 *boxes_in,
volatile int *best_box_idx, volatile float *best_box_iou, bool offset,
const float* means, const float* stds, float scale, const float4 *anchors_as_cwh) {
for (unsigned int anchor = threadIdx.x; anchor < anchors_count; anchor += blockDim.x) {
if (best_box_iou[anchor] > criteria) {
int box_idx = best_box_idx[anchor];
labels_out[anchor] = labels_in[box_idx];
float4 box = boxes_in[box_idx];
if (!offset)
boxes_out[anchor] = ToCenterWidthHeight(box);
else
boxes_out[anchor] = MatchOffsets(
ToCenterWidthHeight(box), anchors_as_cwh[anchor], means, stds, scale);
}
}
}
__device__ void MatchBoxWithAnchors(
const float4 &box, const int box_idx, unsigned int anchors_count, const float4 *anchors,
volatile int *best_anchor_idx_tmp, volatile float *best_anchor_iou_tmp,
volatile int *best_box_idx, volatile float *best_box_iou) {
float best_anchor_iou = -1.0f;
int best_anchor_idx = -1;
for (unsigned int anchor = threadIdx.x; anchor < anchors_count; anchor += blockDim.x) {
float new_val = CalculateIou(box, anchors[anchor]);
if (new_val >= best_anchor_iou) {
best_anchor_iou = new_val;
best_anchor_idx = anchor;
}
if (new_val >= best_box_iou[anchor]) {
best_box_iou[anchor] = new_val;
best_box_idx[anchor] = box_idx;
}
}
best_anchor_iou_tmp[threadIdx.x] = best_anchor_iou;
best_anchor_idx_tmp[threadIdx.x] = best_anchor_idx;
}
template <int BLOCK_SIZE>
__global__ void Encode(
const float4 *boxes_in, const int *labels_in, const int *offsets, const int anchors_count,
const float4 *anchors, const float criteria, float4 *boxes_out, int *labels_out,
int *box_idx_buffer, float *box_iou_buffer, bool offset, const float* means,
const float* stds, float scale, const float4 *anchors_as_cwh) {
const int sample = blockIdx.x;
// Remark: This algorithm is very fragile to floating point arithmetic effects.
// For now, excessive use of volatile in this code,
// makes it conform to reference solution in terms of resulting encoding.
__shared__ volatile int best_anchor_idx_tmp[BLOCK_SIZE];
__shared__ volatile float best_anchor_iou_tmp[BLOCK_SIZE];
volatile int *best_box_idx = box_idx_buffer + sample * anchors_count;
volatile float *best_box_iou = box_iou_buffer + sample * anchors_count;
int box_idx = 0;
for (int box_global_idx = offsets[sample]; box_global_idx < offsets[sample+1]; ++box_global_idx) {
MatchBoxWithAnchors(
boxes_in[box_global_idx],
box_idx,
anchors_count,
anchors,
best_anchor_idx_tmp,
best_anchor_iou_tmp,
best_box_idx,
best_box_iou);
__syncthreads();
FindBestMatch(blockDim.x, best_anchor_iou_tmp, best_anchor_idx_tmp);
__syncthreads();
if (threadIdx.x == 0) {
int idx = best_anchor_idx_tmp[0];
best_box_idx[idx] = box_idx;
best_box_iou[idx] = 2.f;
}
__syncthreads();
box_idx++;
}
__syncthreads();
WriteMatchesToOutput(
anchors_count,
criteria,
labels_out + sample * anchors_count,
labels_in + offsets[sample],
boxes_out + sample * anchors_count,
boxes_in + offsets[sample],
best_box_idx,
best_box_iou,
offset,
means,
stds,
scale,
anchors_as_cwh);
}
std::pair<int *, float *> BoxEncoder<GPUBackend>::ClearBuffers(const cudaStream_t &stream) {
auto best_box_idx_data = best_box_idx_.mutable_data<int>();
auto best_box_iou_data = best_box_iou_.mutable_data<float>();
CUDA_CALL(cudaMemsetAsync(
best_box_idx_data, 0, curr_batch_size_ * anchors_count_ * sizeof(int), stream));
CUDA_CALL(cudaMemsetAsync(
best_box_iou_data, 0, curr_batch_size_ * anchors_count_ * sizeof(float), stream));
return {best_box_idx_data, best_box_iou_data};
}
void BoxEncoder<GPUBackend>::WriteAnchorsToOutput(
float4 *boxes_out_data, int *labels_out_data, const cudaStream_t &stream) {
CUDA_CALL(cudaMemsetAsync(
labels_out_data,
0,
curr_batch_size_ * anchors_count_ * sizeof(int), stream));
for (int sample = 0; sample < curr_batch_size_; ++sample)
MemCopy(
boxes_out_data + sample * anchors_count_,
anchors_as_center_wh_.data<float>(),
anchors_count_ * BoundingBox::size * sizeof(float),
stream);
}
void BoxEncoder<GPUBackend>::ClearOutput(
float4 *boxes_out_data, int *labels_out_data, const cudaStream_t &stream) {
CUDA_CALL(cudaMemsetAsync(
labels_out_data,
0,
curr_batch_size_ * anchors_count_ * sizeof(int),
stream));
for (int sample = 0; sample < curr_batch_size_; ++sample)
CUDA_CALL(cudaMemsetAsync(
boxes_out_data + sample * anchors_count_,
0,
anchors_count_ * BoundingBox::size * sizeof(float),
stream));
}
std::pair<TensorListShape<>, TensorListShape<>>
BoxEncoder<GPUBackend>::CalculateDims(
const TensorList<GPUBackend> &boxes_input) {
TensorListShape<> boxes_output_shape(boxes_input.ntensor(), kBoxesOutputDim);
TensorListShape<> labels_output_shape(boxes_input.ntensor(), kLabelsOutputDim);
for (size_t i = 0; i < boxes_input.ntensor(); i++) {
boxes_output_shape.set_tensor_shape(i,
{anchors_count_, static_cast<int64_t>(BoundingBox::size)});
labels_output_shape.set_tensor_shape(i, {anchors_count_});
}
return {boxes_output_shape, labels_output_shape};
}
int *BoxEncoder<GPUBackend>::CalculateBoxesOffsets(
const TensorList<GPUBackend> &boxes_input, const cudaStream_t &stream) {
vector<int> offsets {0};
for (int i = 0; i < boxes_input.shape().size(); i++)
offsets.push_back(boxes_input.shape().tensor_shape_span(i)[0] + offsets.back());
auto offsets_data = boxes_offsets_.mutable_data<int>();
MemCopy(offsets_data, offsets.data(), (curr_batch_size_ + 1) * sizeof(int), stream);
return offsets_data;
}
void BoxEncoder<GPUBackend>::RunImpl(Workspace<GPUBackend> &ws) {
const auto &boxes_input = ws.Input<GPUBackend>(kBoxesInId);
const auto &labels_input = ws.Input<GPUBackend>(kLabelsInId);
assert(ws.GetInputBatchSize(kBoxesInId) == ws.GetInputBatchSize(kLabelsInId));
auto curr_batch_size = ws.GetInputBatchSize(kBoxesInId);
const auto anchors_data = reinterpret_cast<const float4 *>(anchors_.data<float>());
const auto anchors_as_cwh_data =
reinterpret_cast<const float4 *>(anchors_as_center_wh_.data<float>());
const auto boxes_data = reinterpret_cast<const float4 *>(boxes_input.data<float>());
const auto labels_data = labels_input.data<int>();
const auto buffers = ClearBuffers(ws.stream());
auto boxes_offsets_data = CalculateBoxesOffsets(boxes_input, ws.stream());
auto dims = CalculateDims(boxes_input);
auto &boxes_output = ws.Output<GPUBackend>(kBoxesOutId);
boxes_output.set_type(boxes_input.type());
boxes_output.Resize(dims.first);
auto boxes_out_data = reinterpret_cast<float4 *>(boxes_output.mutable_data<float>());
auto &labels_output = ws.Output<GPUBackend>(kLabelsOutId);
labels_output.set_type(labels_input.type());
labels_output.Resize(dims.second);
auto labels_out_data = labels_output.mutable_data<int>();
const auto means_data = means_.data<float>();
const auto stds_data = stds_.data<float>();
if (!offset_)
WriteAnchorsToOutput(boxes_out_data, labels_out_data, ws.stream());
else
ClearOutput(boxes_out_data, labels_out_data, ws.stream());
Encode<BlockSize><<<curr_batch_size, BlockSize, 0, ws.stream()>>>(
boxes_data,
labels_data,
boxes_offsets_data,
anchors_count_,
anchors_data,
criteria_,
boxes_out_data,
labels_out_data,
buffers.first,
buffers.second,
offset_,
means_data,
stds_data,
scale_,
anchors_as_cwh_data);
}
DALI_REGISTER_OPERATOR(BoxEncoder, BoxEncoder<GPUBackend>, GPU);
} // namespace dali
|
c2baa1b6d0a3249aaaa7689f8ffd6e2b6faa50ab.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=512 --blockDim=384
typedef unsigned char Pixel;
texture<unsigned char, 2> tex;
#define min(x,y) (x < y ? x : y)
#define max(x,y) (x < y ? y : x)
__global__ void
SobelCopyImage(Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fscale)
{
__requires(Pitch == 512);
__requires(w == 512);
unsigned char *pSobel =
(unsigned char *)(((char *) pSobelOriginal)+blockIdx.x*Pitch);
for (int i = threadIdx.x;
__global_invariant(i % blockDim.x == threadIdx.x),
__global_invariant(__write_implies(pSobelOriginal, __write_offset_bytes(pSobelOriginal)%Pitch%blockDim.x == threadIdx.x)),
i < w; i += blockDim.x)
{
pSobel[i] = min(max((tex2D(tex, (float) i, (float) blockIdx.x) * fscale), 0.f), 255.f);
}
}
|
c2baa1b6d0a3249aaaa7689f8ffd6e2b6faa50ab.cu
|
//pass
//--gridDim=512 --blockDim=384
typedef unsigned char Pixel;
texture<unsigned char, 2> tex;
#define min(x,y) (x < y ? x : y)
#define max(x,y) (x < y ? y : x)
__global__ void
SobelCopyImage(Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fscale)
{
__requires(Pitch == 512);
__requires(w == 512);
unsigned char *pSobel =
(unsigned char *)(((char *) pSobelOriginal)+blockIdx.x*Pitch);
for (int i = threadIdx.x;
__global_invariant(i % blockDim.x == threadIdx.x),
__global_invariant(__write_implies(pSobelOriginal, __write_offset_bytes(pSobelOriginal)%Pitch%blockDim.x == threadIdx.x)),
i < w; i += blockDim.x)
{
pSobel[i] = min(max((tex2D(tex, (float) i, (float) blockIdx.x) * fscale), 0.f), 255.f);
}
}
|
70d9eeb35cd247a55971ec9d69afd1014c8e03fc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Counting(int* HalfData, int HalfDataSize, int N)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i<HalfDataSize)
{
HalfData[i] *= N;
}
}
|
70d9eeb35cd247a55971ec9d69afd1014c8e03fc.cu
|
#include "includes.h"
__global__ void Counting(int* HalfData, int HalfDataSize, int N)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i<HalfDataSize)
{
HalfData[i] *= N;
}
}
|
9d296ae4b252090ef5d619afe7a7ea4931c9e167.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "test_comm.h"
void host_norm(double* pIn, double *pOut, int sizeIn, int sizeOut)
{
int i, j;
double *data1, *data2;
float *data1f, *data2f;
float *data1f_gpu, *data2f_gpu;
int sizeBlock;
// variable for time measure
int it;
float t_avg;
t_avg = 0;
//ITERATE defined in project_comm.h
it = ITERATE;
sizeBlock = VECTOR_BLOCK_SIZE;
// get Input data pointer
data1 = pIn;
data2 = pOut;
// Find the dimensions of the data
// Create an mxArray for the output data
// Create an input and output data array on the GPU
hipMalloc( (void **) &data1f_gpu,sizeof(float)*sizeIn);
hipMalloc( (void **) &data2f_gpu,sizeof(float)*sizeOut);
// Retrieve the input data
// Check if the input array is single or double precision
// The input array is in double precision, it needs to be converted t floats before being sent to the card
data1f = (float *) malloc(sizeof(float)*sizeIn);
for (j = 0; j < sizeIn; j++)
{
data1f[j] = (float) data1[j];
}
data2f = (float *) malloc(sizeof(float)*sizeOut);
clock_t startTime;
clock_t endTime;
startTime=clock();
hipMemcpy( data1f_gpu, data1f, sizeof(float)*sizeIn, hipMemcpyHostToDevice);
// Compute execution configuration using 128 threads per block
dim3 dimBlock(sizeBlock);
dim3 dimGrid((sizeIn)/dimBlock.x);
if ( (sizeIn) % sizeBlock !=0 ) dimGrid.x+=1;
for (i = 0; i < it ; i++){
// Call function on GPU
hipLaunchKernelGGL(( norm_elements), dim3(dimGrid),dim3(dimBlock), 0, 0, data1f_gpu, data2f_gpu, sizeIn);
hipError_t e;
e = hipGetLastError();
if ( e != hipSuccess)
{
fprintf(stderr, "CUDA Error on square_elements: '%s' \n", hipGetErrorString(e));
exit(-1);
}
}//for it
endTime=clock();
t_avg += endTime-startTime;
printf("laufTime in GPU = %lf (ms)\n", ((double) t_avg)*1000 /(it* CLOCKS_PER_SEC));
// Copy result back to host
hipMemcpy( data2f, data2f_gpu, sizeof(float)*sizeOut, hipMemcpyDeviceToHost);
// Create a pointer to the output data
// Convert from single to double before returning
for (j = 0; j < sizeOut; j++)
{
data2[j] = (double) data2f[j];
}
// Clean-up memory on device and host
free(data1f);
free(data2f);
hipFree(data1f_gpu);
hipFree(data2f_gpu);
}
int test_norm()
{
double *pIn, *pOut;
int sizeIn, sizeOut;
int i;
sizeIn = 1000;
sizeOut = sizeIn/VECTOR_BLOCK_SIZE;
pIn = (double*)malloc(sizeof(double)*sizeIn);
pOut = (double*)malloc(sizeof(double)*sizeOut);
/*
pIn[0] = 3;
pIn[1] = 4;
//pIn[2] = 3;
*/
for (i = 0; i < sizeIn; i++){
pIn[i] = 1;
}
host_norm(pIn, pOut, sizeIn, sizeOut);
printf("output square result");
for (i = 0; i < sizeOut; i++)
{
printf(" pOut[%d] = %lf, ", i, pOut[i]);
}
printf("\n");
printf("output norm result");
for (i = 0; i < sizeOut; i++)
{
//pOut[i] = sqrt(pOut[i]);
printf("squre of pOut[%d] = %lf, ", i, pOut[i]);
}
printf("\n");
free(pIn);
free(pOut);
return 0;
}
int mexTest_norm(double *pIn,double *pOut,int sizeIn)
{
//double *pOut;
int sizeOut;
int i;
//sizeOut =sizeIn/VECTOR_BLOCK_SIZE + 1;
sizeOut=1;
//pOut = (double*)malloc(sizeof(double)*sizeOut);
host_norm(pIn, pOut, sizeIn, sizeOut);
double expect=sizeIn;
//printf("output square result");
//if(pOut[0] != expect){
//for (i = 0; i < sizeOut; i++)
//{
//printf(" pOut[%d] = %lf, ", i, pOut[i]);
//}
//}
//free(pOut);
return 0;
}
|
9d296ae4b252090ef5d619afe7a7ea4931c9e167.cu
|
#include "test_comm.h"
void host_norm(double* pIn, double *pOut, int sizeIn, int sizeOut)
{
int i, j;
double *data1, *data2;
float *data1f, *data2f;
float *data1f_gpu, *data2f_gpu;
int sizeBlock;
// variable for time measure
int it;
float t_avg;
t_avg = 0;
//ITERATE defined in project_comm.h
it = ITERATE;
sizeBlock = VECTOR_BLOCK_SIZE;
// get Input data pointer
data1 = pIn;
data2 = pOut;
// Find the dimensions of the data
// Create an mxArray for the output data
// Create an input and output data array on the GPU
cudaMalloc( (void **) &data1f_gpu,sizeof(float)*sizeIn);
cudaMalloc( (void **) &data2f_gpu,sizeof(float)*sizeOut);
// Retrieve the input data
// Check if the input array is single or double precision
// The input array is in double precision, it needs to be converted t floats before being sent to the card
data1f = (float *) malloc(sizeof(float)*sizeIn);
for (j = 0; j < sizeIn; j++)
{
data1f[j] = (float) data1[j];
}
data2f = (float *) malloc(sizeof(float)*sizeOut);
clock_t startTime;
clock_t endTime;
startTime=clock();
cudaMemcpy( data1f_gpu, data1f, sizeof(float)*sizeIn, cudaMemcpyHostToDevice);
// Compute execution configuration using 128 threads per block
dim3 dimBlock(sizeBlock);
dim3 dimGrid((sizeIn)/dimBlock.x);
if ( (sizeIn) % sizeBlock !=0 ) dimGrid.x+=1;
for (i = 0; i < it ; i++){
// Call function on GPU
norm_elements<<<dimGrid,dimBlock>>>(data1f_gpu, data2f_gpu, sizeIn);
cudaError_t e;
e = cudaGetLastError();
if ( e != cudaSuccess)
{
fprintf(stderr, "CUDA Error on square_elements: '%s' \n", cudaGetErrorString(e));
exit(-1);
}
}//for it
endTime=clock();
t_avg += endTime-startTime;
printf("laufTime in GPU = %lf (ms)\n", ((double) t_avg)*1000 /(it* CLOCKS_PER_SEC));
// Copy result back to host
cudaMemcpy( data2f, data2f_gpu, sizeof(float)*sizeOut, cudaMemcpyDeviceToHost);
// Create a pointer to the output data
// Convert from single to double before returning
for (j = 0; j < sizeOut; j++)
{
data2[j] = (double) data2f[j];
}
// Clean-up memory on device and host
free(data1f);
free(data2f);
cudaFree(data1f_gpu);
cudaFree(data2f_gpu);
}
int test_norm()
{
double *pIn, *pOut;
int sizeIn, sizeOut;
int i;
sizeIn = 1000;
sizeOut = sizeIn/VECTOR_BLOCK_SIZE;
pIn = (double*)malloc(sizeof(double)*sizeIn);
pOut = (double*)malloc(sizeof(double)*sizeOut);
/*
pIn[0] = 3;
pIn[1] = 4;
//pIn[2] = 3;
*/
for (i = 0; i < sizeIn; i++){
pIn[i] = 1;
}
host_norm(pIn, pOut, sizeIn, sizeOut);
printf("output square result");
for (i = 0; i < sizeOut; i++)
{
printf(" pOut[%d] = %lf, ", i, pOut[i]);
}
printf("\n");
printf("output norm result");
for (i = 0; i < sizeOut; i++)
{
//pOut[i] = sqrt(pOut[i]);
printf("squre of pOut[%d] = %lf, ", i, pOut[i]);
}
printf("\n");
free(pIn);
free(pOut);
return 0;
}
int mexTest_norm(double *pIn,double *pOut,int sizeIn)
{
//double *pOut;
int sizeOut;
int i;
//sizeOut =sizeIn/VECTOR_BLOCK_SIZE + 1;
sizeOut=1;
//pOut = (double*)malloc(sizeof(double)*sizeOut);
host_norm(pIn, pOut, sizeIn, sizeOut);
double expect=sizeIn;
//printf("output square result");
//if(pOut[0] != expect){
//for (i = 0; i < sizeOut; i++)
//{
//printf(" pOut[%d] = %lf, ", i, pOut[i]);
//}
//}
//free(pOut);
return 0;
}
|
070228889be1e880cc38b8078b100bf480eba5f7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
#include <cusolverSp.h>
#include "Utilities.cuh"
/********/
/* MAIN */
/********/
int main()
{
// --- Initialize cuSPARSE
hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle));
const int Nrows = 4; // --- Number of rows
const int Ncols = 4; // --- Number of columns
const int N = Nrows;
// --- Host side dense matrix
double *h_A_dense = (double*)malloc(Nrows*Ncols*sizeof(*h_A_dense));
// --- Column-major ordering
h_A_dense[0] = 1.0f; h_A_dense[4] = 4.0f; h_A_dense[8] = 0.0f; h_A_dense[12] = 0.0f;
h_A_dense[1] = 0.0f; h_A_dense[5] = 2.0f; h_A_dense[9] = 3.0f; h_A_dense[13] = 0.0f;
h_A_dense[2] = 5.0f; h_A_dense[6] = 0.0f; h_A_dense[10] = 0.0f; h_A_dense[14] = 7.0f;
h_A_dense[3] = 0.0f; h_A_dense[7] = 0.0f; h_A_dense[11] = 9.0f; h_A_dense[15] = 0.0f;
//create device array and copy host to it
double *d_A_dense; gpuErrchk(hipMalloc(&d_A_dense, Nrows * Ncols * sizeof(*d_A_dense)));
gpuErrchk(hipMemcpy(d_A_dense, h_A_dense, Nrows * Ncols * sizeof(*d_A_dense), hipMemcpyHostToDevice));
// --- Descriptor for sparse matrix A
hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA));
hipsparseSetMatType (descrA, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase (descrA, HIPSPARSE_INDEX_BASE_ZERO);
int nnz = 0; // --- Number of nonzero elements in dense matrix
const int lda = Nrows; // --- Leading dimension of dense matrix
// --- Device side number of nonzero elements per row
int *d_nnzPerVector; gpuErrchk(hipMalloc(&d_nnzPerVector, Nrows * sizeof(*d_nnzPerVector)));
cusparseSafeCall(hipsparseDnnz(handle, HIPSPARSE_DIRECTION_ROW, Nrows, Ncols, descrA, d_A_dense, lda, d_nnzPerVector, &nnz));
// --- Host side number of nonzero elements per row
int *h_nnzPerVector = (int *)malloc(Nrows * sizeof(*h_nnzPerVector));
gpuErrchk(hipMemcpy(h_nnzPerVector, d_nnzPerVector, Nrows * sizeof(*h_nnzPerVector), hipMemcpyDeviceToHost));
printf("Number of nonzero elements in dense matrix = %i\n\n", nnz);
for (int i = 0; i < Nrows; ++i) printf("Number of nonzero elements in row %i = %i \n", i, h_nnzPerVector[i]);
printf("\n");
// --- Device side dense matrix
double *d_A; gpuErrchk(hipMalloc(&d_A, nnz * sizeof(*d_A)));
int *d_A_RowIndices; gpuErrchk(hipMalloc(&d_A_RowIndices, (Nrows + 1) * sizeof(*d_A_RowIndices)));
int *d_A_ColIndices; gpuErrchk(hipMalloc(&d_A_ColIndices, nnz * sizeof(*d_A_ColIndices)));
cusparseSafeCall(hipsparseDdense2csr(handle, Nrows, Ncols, descrA, d_A_dense, lda, d_nnzPerVector, d_A, d_A_RowIndices, d_A_ColIndices));
// --- Host side dense matrix
double *h_A = (double *)malloc(nnz * sizeof(*h_A));
int *h_A_RowIndices = (int *)malloc((Nrows + 1) * sizeof(*h_A_RowIndices));
int *h_A_ColIndices = (int *)malloc(nnz * sizeof(*h_A_ColIndices));
gpuErrchk(hipMemcpy(h_A, d_A, nnz*sizeof(*h_A), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(h_A_RowIndices, d_A_RowIndices, (Nrows + 1) * sizeof(*h_A_RowIndices), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(h_A_ColIndices, d_A_ColIndices, nnz * sizeof(*h_A_ColIndices), hipMemcpyDeviceToHost));
for (int i = 0; i < nnz; ++i) printf("A[%i] = %.0f ", i, h_A[i]); printf("\n");
for (int i = 0; i < (Nrows + 1); ++i) printf("h_A_RowIndices[%i] = %i \n", i, h_A_RowIndices[i]); printf("\n");
for (int i = 0; i < nnz; ++i) printf("h_A_ColIndices[%i] = %i \n", i, h_A_ColIndices[i]);
// --- Allocating and defining dense host and device data vectors
double *h_y = (double *)malloc(Nrows * sizeof(double));
h_y[0] = 100.0; h_y[1] = 200.0; h_y[2] = 400.0; h_y[3] = 500.0;
double *d_y; gpuErrchk(hipMalloc(&d_y, Nrows * sizeof(double)));
gpuErrchk(hipMemcpy(d_y, h_y, Nrows * sizeof(double), hipMemcpyHostToDevice));
// --- Allocating the host and device side result vector
double *h_x = (double *)malloc(Ncols * sizeof(double));
double *d_x; gpuErrchk(hipMalloc(&d_x, Ncols * sizeof(double)));
// --- CUDA solver initialization
cusolverSpHandle_t solver_handle;
cusolverSpCreate(&solver_handle);
//int singularity;
// --- Using LU factorization
//cusolveSafeCall(cusolverSpDcsrlsvluHost(solver_handle, N, nnz, descrA, h_A, h_A_RowIndices, h_A_ColIndices, h_y, 0.000001, 0, h_x, &singularity));
// --- Using QR factorization
//cusolveSafeCall(cusolverSpDcsrlsvqrHost(solver_handle, N, nnz, descrA, h_A, h_A_RowIndices, h_A_ColIndices, h_y, 0.000001, 0, h_x, &singularity));
int rankA;
int *p = (int *)malloc(N * sizeof(int));
double min_norm;
cusolveSafeCall(cusolverSpDcsrlsqvqrHost(solver_handle, N, N, nnz, descrA, h_A, h_A_RowIndices, h_A_ColIndices, h_y, 0.000001, &rankA, h_x, p, &min_norm));
printf("Showing the results...\n");
for (int i = 0; i < N; i++) printf("%f\n", h_x[i]);
}
|
070228889be1e880cc38b8078b100bf480eba5f7.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <cusparse_v2.h>
#include <cusolverSp.h>
#include "Utilities.cuh"
/********/
/* MAIN */
/********/
int main()
{
// --- Initialize cuSPARSE
cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle));
const int Nrows = 4; // --- Number of rows
const int Ncols = 4; // --- Number of columns
const int N = Nrows;
// --- Host side dense matrix
double *h_A_dense = (double*)malloc(Nrows*Ncols*sizeof(*h_A_dense));
// --- Column-major ordering
h_A_dense[0] = 1.0f; h_A_dense[4] = 4.0f; h_A_dense[8] = 0.0f; h_A_dense[12] = 0.0f;
h_A_dense[1] = 0.0f; h_A_dense[5] = 2.0f; h_A_dense[9] = 3.0f; h_A_dense[13] = 0.0f;
h_A_dense[2] = 5.0f; h_A_dense[6] = 0.0f; h_A_dense[10] = 0.0f; h_A_dense[14] = 7.0f;
h_A_dense[3] = 0.0f; h_A_dense[7] = 0.0f; h_A_dense[11] = 9.0f; h_A_dense[15] = 0.0f;
//create device array and copy host to it
double *d_A_dense; gpuErrchk(cudaMalloc(&d_A_dense, Nrows * Ncols * sizeof(*d_A_dense)));
gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense, Nrows * Ncols * sizeof(*d_A_dense), cudaMemcpyHostToDevice));
// --- Descriptor for sparse matrix A
cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA));
cusparseSetMatType (descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase (descrA, CUSPARSE_INDEX_BASE_ZERO);
int nnz = 0; // --- Number of nonzero elements in dense matrix
const int lda = Nrows; // --- Leading dimension of dense matrix
// --- Device side number of nonzero elements per row
int *d_nnzPerVector; gpuErrchk(cudaMalloc(&d_nnzPerVector, Nrows * sizeof(*d_nnzPerVector)));
cusparseSafeCall(cusparseDnnz(handle, CUSPARSE_DIRECTION_ROW, Nrows, Ncols, descrA, d_A_dense, lda, d_nnzPerVector, &nnz));
// --- Host side number of nonzero elements per row
int *h_nnzPerVector = (int *)malloc(Nrows * sizeof(*h_nnzPerVector));
gpuErrchk(cudaMemcpy(h_nnzPerVector, d_nnzPerVector, Nrows * sizeof(*h_nnzPerVector), cudaMemcpyDeviceToHost));
printf("Number of nonzero elements in dense matrix = %i\n\n", nnz);
for (int i = 0; i < Nrows; ++i) printf("Number of nonzero elements in row %i = %i \n", i, h_nnzPerVector[i]);
printf("\n");
// --- Device side dense matrix
double *d_A; gpuErrchk(cudaMalloc(&d_A, nnz * sizeof(*d_A)));
int *d_A_RowIndices; gpuErrchk(cudaMalloc(&d_A_RowIndices, (Nrows + 1) * sizeof(*d_A_RowIndices)));
int *d_A_ColIndices; gpuErrchk(cudaMalloc(&d_A_ColIndices, nnz * sizeof(*d_A_ColIndices)));
cusparseSafeCall(cusparseDdense2csr(handle, Nrows, Ncols, descrA, d_A_dense, lda, d_nnzPerVector, d_A, d_A_RowIndices, d_A_ColIndices));
// --- Host side dense matrix
double *h_A = (double *)malloc(nnz * sizeof(*h_A));
int *h_A_RowIndices = (int *)malloc((Nrows + 1) * sizeof(*h_A_RowIndices));
int *h_A_ColIndices = (int *)malloc(nnz * sizeof(*h_A_ColIndices));
gpuErrchk(cudaMemcpy(h_A, d_A, nnz*sizeof(*h_A), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_A_RowIndices, d_A_RowIndices, (Nrows + 1) * sizeof(*h_A_RowIndices), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_A_ColIndices, d_A_ColIndices, nnz * sizeof(*h_A_ColIndices), cudaMemcpyDeviceToHost));
for (int i = 0; i < nnz; ++i) printf("A[%i] = %.0f ", i, h_A[i]); printf("\n");
for (int i = 0; i < (Nrows + 1); ++i) printf("h_A_RowIndices[%i] = %i \n", i, h_A_RowIndices[i]); printf("\n");
for (int i = 0; i < nnz; ++i) printf("h_A_ColIndices[%i] = %i \n", i, h_A_ColIndices[i]);
// --- Allocating and defining dense host and device data vectors
double *h_y = (double *)malloc(Nrows * sizeof(double));
h_y[0] = 100.0; h_y[1] = 200.0; h_y[2] = 400.0; h_y[3] = 500.0;
double *d_y; gpuErrchk(cudaMalloc(&d_y, Nrows * sizeof(double)));
gpuErrchk(cudaMemcpy(d_y, h_y, Nrows * sizeof(double), cudaMemcpyHostToDevice));
// --- Allocating the host and device side result vector
double *h_x = (double *)malloc(Ncols * sizeof(double));
double *d_x; gpuErrchk(cudaMalloc(&d_x, Ncols * sizeof(double)));
// --- CUDA solver initialization
cusolverSpHandle_t solver_handle;
cusolverSpCreate(&solver_handle);
//int singularity;
// --- Using LU factorization
//cusolveSafeCall(cusolverSpDcsrlsvluHost(solver_handle, N, nnz, descrA, h_A, h_A_RowIndices, h_A_ColIndices, h_y, 0.000001, 0, h_x, &singularity));
// --- Using QR factorization
//cusolveSafeCall(cusolverSpDcsrlsvqrHost(solver_handle, N, nnz, descrA, h_A, h_A_RowIndices, h_A_ColIndices, h_y, 0.000001, 0, h_x, &singularity));
int rankA;
int *p = (int *)malloc(N * sizeof(int));
double min_norm;
cusolveSafeCall(cusolverSpDcsrlsqvqrHost(solver_handle, N, N, nnz, descrA, h_A, h_A_RowIndices, h_A_ColIndices, h_y, 0.000001, &rankA, h_x, p, &min_norm));
printf("Showing the results...\n");
for (int i = 0; i < N; i++) printf("%f\n", h_x[i]);
}
|
5b319c8fbff140c1846f9d4469a3f34aa4e7160f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <unistd.h>
#include <future>
#include <mutex>
#include <stdio.h>
// This works fine with a mutex, but crashes with a sigbus error when not using a mutex
// #define USE_MUTEX
#ifdef USE_MUTEX
std::mutex m;
#endif
__global__ void testKernel() {
printf("Thread Kernel running\n");
}
void testCuda() {
hipLaunchKernelGGL(( testKernel), dim3(1),dim3(1), 0, 0, );
hipError_t err = hipDeviceSynchronize();
if (err != hipSuccess) {
printf("SYNC FAILED\n\n\n");
}
}
struct MyThread {
void run() {
int threadLoop = 0;
while(1) {
#ifdef USE_MUTEX
m.lock();
#endif
printf("Thread Run (loop %d)\n", threadLoop++);
// run kernel
testCuda();
#ifdef USE_MUTEX
m.unlock();
#endif
usleep(0);
}
}
};
int main(int argc, char** argv) {
MyThread thread;
auto threadFuture = std::async(std::launch::async, &MyThread::run, thread);
int loop = 0;
while(1){
#ifdef USE_MUTEX
m.lock();
#endif
int* temp = nullptr;
printf("*** Main Allocating (loop = %d)\n", loop++);
hipError_t err = hipMallocManaged(&temp, sizeof(int));
if (err != hipSuccess) {
printf("Failed to hipMallocManaged()\n");
return -1;
}
*temp = 0; // <-- SIGBUS occurs here if don't use a mutex
printf("*** Main Finished Allocating value: %d\n", *temp);
#ifdef USE_MUTEX
m.unlock();
#endif
usleep(0);
}
}
|
5b319c8fbff140c1846f9d4469a3f34aa4e7160f.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <unistd.h>
#include <future>
#include <mutex>
#include <stdio.h>
// This works fine with a mutex, but crashes with a sigbus error when not using a mutex
// #define USE_MUTEX
#ifdef USE_MUTEX
std::mutex m;
#endif
__global__ void testKernel() {
printf("Thread Kernel running\n");
}
void testCuda() {
testKernel<<<1,1>>>();
cudaError_t err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
printf("SYNC FAILED\n\n\n");
}
}
struct MyThread {
void run() {
int threadLoop = 0;
while(1) {
#ifdef USE_MUTEX
m.lock();
#endif
printf("Thread Run (loop %d)\n", threadLoop++);
// run kernel
testCuda();
#ifdef USE_MUTEX
m.unlock();
#endif
usleep(0);
}
}
};
int main(int argc, char** argv) {
MyThread thread;
auto threadFuture = std::async(std::launch::async, &MyThread::run, thread);
int loop = 0;
while(1){
#ifdef USE_MUTEX
m.lock();
#endif
int* temp = nullptr;
printf("*** Main Allocating (loop = %d)\n", loop++);
cudaError_t err = cudaMallocManaged(&temp, sizeof(int));
if (err != cudaSuccess) {
printf("Failed to cudaMallocManaged()\n");
return -1;
}
*temp = 0; // <-- SIGBUS occurs here if don't use a mutex
printf("*** Main Finished Allocating value: %d\n", *temp);
#ifdef USE_MUTEX
m.unlock();
#endif
usleep(0);
}
}
|
4865dfb5524f62e6d52b26c37c667b08c4e59684.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void readGlobalMemoryUnit(float *data, float *output, int size, int repeat)
{
int gid = threadIdx.x + (blockDim.x * blockIdx.x), j = 0;
float sum = 0;
int s = gid*512;
for (j=0 ; j<repeat ; ++j)
{
float a0 = data[(s+0)&(size-1)];
float a1 = data[(s+1)&(size-1)];
float a2 = data[(s+2)&(size-1)];
float a3 = data[(s+3)&(size-1)];
float a4 = data[(s+4)&(size-1)];
float a5 = data[(s+5)&(size-1)];
float a6 = data[(s+6)&(size-1)];
float a7 = data[(s+7)&(size-1)];
float a8 = data[(s+8)&(size-1)];
float a9 = data[(s+9)&(size-1)];
float a10 = data[(s+10)&(size-1)];
float a11 = data[(s+11)&(size-1)];
float a12 = data[(s+12)&(size-1)];
float a13 = data[(s+13)&(size-1)];
float a14 = data[(s+14)&(size-1)];
float a15 = data[(s+15)&(size-1)];
sum += a0+a1+a2+a3+a4+a5+a6+a7+a8+a9+a10+a11+a12+a13+a14+a15;
s = (s+16)&(size-1);
}
output[gid] = sum;
}
|
4865dfb5524f62e6d52b26c37c667b08c4e59684.cu
|
#include "includes.h"
__global__ void readGlobalMemoryUnit(float *data, float *output, int size, int repeat)
{
int gid = threadIdx.x + (blockDim.x * blockIdx.x), j = 0;
float sum = 0;
int s = gid*512;
for (j=0 ; j<repeat ; ++j)
{
float a0 = data[(s+0)&(size-1)];
float a1 = data[(s+1)&(size-1)];
float a2 = data[(s+2)&(size-1)];
float a3 = data[(s+3)&(size-1)];
float a4 = data[(s+4)&(size-1)];
float a5 = data[(s+5)&(size-1)];
float a6 = data[(s+6)&(size-1)];
float a7 = data[(s+7)&(size-1)];
float a8 = data[(s+8)&(size-1)];
float a9 = data[(s+9)&(size-1)];
float a10 = data[(s+10)&(size-1)];
float a11 = data[(s+11)&(size-1)];
float a12 = data[(s+12)&(size-1)];
float a13 = data[(s+13)&(size-1)];
float a14 = data[(s+14)&(size-1)];
float a15 = data[(s+15)&(size-1)];
sum += a0+a1+a2+a3+a4+a5+a6+a7+a8+a9+a10+a11+a12+a13+a14+a15;
s = (s+16)&(size-1);
}
output[gid] = sum;
}
|
933845207fe3477d8e526ab77d6c347c737592e8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "tile.h"
#include "kernels.h"
#include "fft_helper.h"
#include "common.h"
namespace SCAMP {
SCAMPError_t SCAMP_Tile::do_self_join_full(hipStream_t s) {
SCAMPError_t error;
if(window_size > tile_width) {
return SCAMP_DIM_INCOMPATIBLE;
}
if(window_size > tile_height) {
return SCAMP_DIM_INCOMPATIBLE;
}
error = fft_info->compute_QT(QT_scratch, timeseries_A, timeseries_B, means_B, s);
if(error != SCAMP_NO_ERROR) {
return error;
}
error = kernel_self_join_upper(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1, tile_start_A, tile_start_B, props, fp_type, s);
if(error != SCAMP_NO_ERROR) {
return error;
}
error = fft_info->compute_QT(QT_scratch, timeseries_B, timeseries_A, means_A, s);
if(error != SCAMP_NO_ERROR) {
return error;
}
error = kernel_self_join_lower(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1, tile_start_A, tile_start_B, props, fp_type, s);
if(error != SCAMP_NO_ERROR) {
printf("SCAMP error\n");
return error;
}
return SCAMP_NO_ERROR;
}
SCAMPError_t SCAMP_Tile::do_self_join_half(hipStream_t s) {
SCAMPError_t error;
if(window_size > tile_width) {
return SCAMP_DIM_INCOMPATIBLE;
}
if(window_size > tile_height) {
return SCAMP_DIM_INCOMPATIBLE;
}
error = fft_info->compute_QT(QT_scratch, timeseries_A, timeseries_B, means_B, s);
if(error != SCAMP_NO_ERROR) {
return error;
}
error = kernel_self_join_upper(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1,tile_start_A, tile_start_B, props, fp_type, s);
if(error != SCAMP_NO_ERROR) {
return error;
}
return SCAMP_NO_ERROR;
}
SCAMPError_t SCAMP_Tile::do_ab_join_full(hipStream_t s) {
SCAMPError_t error;
if(window_size > tile_width) {
return SCAMP_DIM_INCOMPATIBLE;
}
if(window_size > tile_height) {
return SCAMP_DIM_INCOMPATIBLE;
}
error = fft_info->compute_QT(QT_scratch, timeseries_A, timeseries_B, means_B, s);
if(error != SCAMP_NO_ERROR) {
return error;
}
error = kernel_ab_join_upper(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1, tile_start_A, tile_start_B, global_start_A, global_start_B, props, fp_type, full_join, s);
if(error != SCAMP_NO_ERROR) {
return error;
}
error = fft_info->compute_QT(QT_scratch, timeseries_B, timeseries_A, means_A, s);
if(error != SCAMP_NO_ERROR) {
return error;
}
error = kernel_ab_join_lower(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1, tile_start_A, tile_start_B, global_start_A, global_start_B, props, fp_type, full_join, s);
if(error != SCAMP_NO_ERROR) {
printf("SCAMP error\n");
return error;
}
return SCAMP_NO_ERROR;
}
}
|
933845207fe3477d8e526ab77d6c347c737592e8.cu
|
#include "tile.h"
#include "kernels.h"
#include "fft_helper.h"
#include "common.h"
namespace SCAMP {
SCAMPError_t SCAMP_Tile::do_self_join_full(cudaStream_t s) {
SCAMPError_t error;
if(window_size > tile_width) {
return SCAMP_DIM_INCOMPATIBLE;
}
if(window_size > tile_height) {
return SCAMP_DIM_INCOMPATIBLE;
}
error = fft_info->compute_QT(QT_scratch, timeseries_A, timeseries_B, means_B, s);
if(error != SCAMP_NO_ERROR) {
return error;
}
error = kernel_self_join_upper(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1, tile_start_A, tile_start_B, props, fp_type, s);
if(error != SCAMP_NO_ERROR) {
return error;
}
error = fft_info->compute_QT(QT_scratch, timeseries_B, timeseries_A, means_A, s);
if(error != SCAMP_NO_ERROR) {
return error;
}
error = kernel_self_join_lower(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1, tile_start_A, tile_start_B, props, fp_type, s);
if(error != SCAMP_NO_ERROR) {
printf("SCAMP error\n");
return error;
}
return SCAMP_NO_ERROR;
}
SCAMPError_t SCAMP_Tile::do_self_join_half(cudaStream_t s) {
SCAMPError_t error;
if(window_size > tile_width) {
return SCAMP_DIM_INCOMPATIBLE;
}
if(window_size > tile_height) {
return SCAMP_DIM_INCOMPATIBLE;
}
error = fft_info->compute_QT(QT_scratch, timeseries_A, timeseries_B, means_B, s);
if(error != SCAMP_NO_ERROR) {
return error;
}
error = kernel_self_join_upper(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1,tile_start_A, tile_start_B, props, fp_type, s);
if(error != SCAMP_NO_ERROR) {
return error;
}
return SCAMP_NO_ERROR;
}
SCAMPError_t SCAMP_Tile::do_ab_join_full(cudaStream_t s) {
SCAMPError_t error;
if(window_size > tile_width) {
return SCAMP_DIM_INCOMPATIBLE;
}
if(window_size > tile_height) {
return SCAMP_DIM_INCOMPATIBLE;
}
error = fft_info->compute_QT(QT_scratch, timeseries_A, timeseries_B, means_B, s);
if(error != SCAMP_NO_ERROR) {
return error;
}
error = kernel_ab_join_upper(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1, tile_start_A, tile_start_B, global_start_A, global_start_B, props, fp_type, full_join, s);
if(error != SCAMP_NO_ERROR) {
return error;
}
error = fft_info->compute_QT(QT_scratch, timeseries_B, timeseries_A, means_A, s);
if(error != SCAMP_NO_ERROR) {
return error;
}
error = kernel_ab_join_lower(QT_scratch, timeseries_A, timeseries_B, df_A, df_B, dg_A, dg_B, norms_A, norms_B, profile_A, profile_B, window_size, tile_width - window_size + 1, tile_height - window_size + 1, tile_start_A, tile_start_B, global_start_A, global_start_B, props, fp_type, full_join, s);
if(error != SCAMP_NO_ERROR) {
printf("SCAMP error\n");
return error;
}
return SCAMP_NO_ERROR;
}
}
|
6d8b9fece3fa2aaab1b9e31a7943c590bcc19578.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<helper_cuda.h>
using std::cout;
using std::endl;
using std::cerr;
constexpr int manualBlockSize = 32;
__global__ void
square(int *array, int arrayCount){
extern __shared__ int dynamicSMem[];
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < arrayCount)
array[idx] *= array[idx];
}
double
reportPotentialOccupancy(void *kernel, int block, size_t dynamicSMem){
int device;
hipDeviceProp_t prop;
int nBlock;
int activeWarps;
int maxWarps;
double occupancy;
checkCudaErrors(hipGetDevice(&device));
checkCudaErrors(hipGetDeviceProperties(&prop,device));
checkCudaErrors(hipOccupancyMaxActiveBlocksPerMultiprocessor(
&nBlock,
kernel,
block,
dynamicSMem
));
activeWarps = nBlock*block / prop.warpSize;
maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize;
occupancy = (double)activeWarps / maxWarps;
return occupancy;
}
int
launchConfig(int *array, int arrayCount, bool automatic){
int block;
int minGrid;
int grid;
size_t dynamicSMemUsage = 0;
hipEvent_t st;
hipEvent_t ed;
float elapsedTime;
double potentialOccupancy;
checkCudaErrors(hipEventCreate(&st));
checkCudaErrors(hipEventCreate(&ed));
if(automatic){
//Returns grid and block size that achieves maximum potential occupancy for a device function.
checkCudaErrors(hipOccupancyMaxPotentialBlockSize(
&minGrid,
&block,
(void*)square,
dynamicSMemUsage,
arrayCount
));
cout<<"suggested block size: "<<block<<endl;
cout<<"minimum grid size for maximum occupancy: "<<minGrid<<endl;
cout<<"dynamic mem:"<<dynamicSMemUsage<<endl;
}else{
block = manualBlockSize;
}
grid = (arrayCount+ block-1)/block;
checkCudaErrors(hipEventRecord(st));
hipLaunchKernelGGL(( square), dim3(grid),dim3(block),dynamicSMemUsage, 0, array, arrayCount);
checkCudaErrors(hipEventRecord(ed));
checkCudaErrors(hipDeviceSynchronize());
potentialOccupancy = reportPotentialOccupancy((void*)square, block,dynamicSMemUsage);
cout<<"Potential occupancy: "<<potentialOccupancy*100<<"%"<<endl;
checkCudaErrors(hipEventElapsedTime(&elapsedTime, st, ed));
cout<<"Elapsed time: "<<elapsedTime<<" ms"<<endl;
return 0;
}
int
test(bool automaticLaunchConfig, int const count = 1000000){
int *array;
int *dArray;
int size = count*sizeof(int);
array = new int[count];
for(int i=0; i<count; i++)
array[i] = i;
checkCudaErrors(hipMalloc(&dArray, size));
checkCudaErrors(hipMemcpy(dArray,array,size,hipMemcpyHostToDevice));
for(int i=0; i<count; i++)
array[i]=0;
launchConfig(dArray, count, automaticLaunchConfig);
checkCudaErrors(hipMemcpy(array, dArray, size, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(dArray));
for(int i=0; i<count; i++)
if(array[i] != i*i){
cout<<"element:"<<i<<" expected:"<<i*i<<" actual:"<<array[i]<<endl;
return 1;
}
delete [] array;
return 0;
}
int
main(){
int status;
//----------
cout<<"starting simple occupancy"<<endl<<endl;
cout<<"[ manual configuration with "<<manualBlockSize
<<" threads per block ]"<<endl;
status = test(false);
if(status){
cerr<<"Test Failed"<<endl;
return -1;
}
//-----------
cout<<endl;
cout<<"[ Automic, occupancy-based configuration ]"<<endl;
status = test(true);
if(status){
cerr<<"Test Failed"<<endl;
return -1;
}
//----------
cout<<endl;
cout<<"Test PASSED"<<endl;
return 0;
}
|
6d8b9fece3fa2aaab1b9e31a7943c590bcc19578.cu
|
#include<iostream>
#include<helper_cuda.h>
using std::cout;
using std::endl;
using std::cerr;
constexpr int manualBlockSize = 32;
__global__ void
square(int *array, int arrayCount){
extern __shared__ int dynamicSMem[];
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < arrayCount)
array[idx] *= array[idx];
}
double
reportPotentialOccupancy(void *kernel, int block, size_t dynamicSMem){
int device;
cudaDeviceProp prop;
int nBlock;
int activeWarps;
int maxWarps;
double occupancy;
checkCudaErrors(cudaGetDevice(&device));
checkCudaErrors(cudaGetDeviceProperties(&prop,device));
checkCudaErrors(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&nBlock,
kernel,
block,
dynamicSMem
));
activeWarps = nBlock*block / prop.warpSize;
maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize;
occupancy = (double)activeWarps / maxWarps;
return occupancy;
}
int
launchConfig(int *array, int arrayCount, bool automatic){
int block;
int minGrid;
int grid;
size_t dynamicSMemUsage = 0;
cudaEvent_t st;
cudaEvent_t ed;
float elapsedTime;
double potentialOccupancy;
checkCudaErrors(cudaEventCreate(&st));
checkCudaErrors(cudaEventCreate(&ed));
if(automatic){
//Returns grid and block size that achieves maximum potential occupancy for a device function.
checkCudaErrors(cudaOccupancyMaxPotentialBlockSize(
&minGrid,
&block,
(void*)square,
dynamicSMemUsage,
arrayCount
));
cout<<"suggested block size: "<<block<<endl;
cout<<"minimum grid size for maximum occupancy: "<<minGrid<<endl;
cout<<"dynamic mem:"<<dynamicSMemUsage<<endl;
}else{
block = manualBlockSize;
}
grid = (arrayCount+ block-1)/block;
checkCudaErrors(cudaEventRecord(st));
square<<<grid,block,dynamicSMemUsage>>>(array, arrayCount);
checkCudaErrors(cudaEventRecord(ed));
checkCudaErrors(cudaDeviceSynchronize());
potentialOccupancy = reportPotentialOccupancy((void*)square, block,dynamicSMemUsage);
cout<<"Potential occupancy: "<<potentialOccupancy*100<<"%"<<endl;
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, st, ed));
cout<<"Elapsed time: "<<elapsedTime<<" ms"<<endl;
return 0;
}
int
test(bool automaticLaunchConfig, int const count = 1000000){
int *array;
int *dArray;
int size = count*sizeof(int);
array = new int[count];
for(int i=0; i<count; i++)
array[i] = i;
checkCudaErrors(cudaMalloc(&dArray, size));
checkCudaErrors(cudaMemcpy(dArray,array,size,cudaMemcpyHostToDevice));
for(int i=0; i<count; i++)
array[i]=0;
launchConfig(dArray, count, automaticLaunchConfig);
checkCudaErrors(cudaMemcpy(array, dArray, size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(dArray));
for(int i=0; i<count; i++)
if(array[i] != i*i){
cout<<"element:"<<i<<" expected:"<<i*i<<" actual:"<<array[i]<<endl;
return 1;
}
delete [] array;
return 0;
}
int
main(){
int status;
//----------
cout<<"starting simple occupancy"<<endl<<endl;
cout<<"[ manual configuration with "<<manualBlockSize
<<" threads per block ]"<<endl;
status = test(false);
if(status){
cerr<<"Test Failed"<<endl;
return -1;
}
//-----------
cout<<endl;
cout<<"[ Automic, occupancy-based configuration ]"<<endl;
status = test(true);
if(status){
cerr<<"Test Failed"<<endl;
return -1;
}
//----------
cout<<endl;
cout<<"Test PASSED"<<endl;
return 0;
}
|
b676ae98a2f1c1def8cd00ab760180164cd362ac.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* jacobi.c - Poisson problem in 3d
*
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
__global__
void jacobi_seq(double *** u, double *** u_old, double *** f, int N) {
double p= 1.0/6.0;
int i,j,k=0;
double delta=2.0/(N+1);
for(i=1;i<=N;i++){
for(j=1;j<=N;j++){
for(k=1;k<=N;k++){
u[i][j][k] = u_old[i-1][j][k] + u_old[i+1][j][k]
+u_old[i][j-1][k] + u_old[i][j+1][k]
+u_old[i][j][k-1] + u_old[i][j][k+1]
+delta*delta*f[i][j][k];
u[i][j][k] *= p;
}
}
}
}
__global__
void jacobi_nat(double *** u, double *** u_old, double *** f, int N) {
double p= 1.0/6.0;
double delta=2.0/(N+1);
int i = (blockIdx.x * blockDim.x + threadIdx.x)+1; // remember +1!!
int j = (blockIdx.y * blockDim.y + threadIdx.y)+1;
int k = (blockIdx.z * blockDim.z + threadIdx.z)+1;
u[i][j][k] = u_old[i-1][j][k] + u_old[i+1][j][k]
+u_old[i][j-1][k] + u_old[i][j+1][k]
+u_old[i][j][k-1] + u_old[i][j][k+1]
+delta*delta*f[i][j][k];
u[i][j][k] *= p;
}
__inline__ __device__
double warpReduceSum(double value){
for (int i = 32; i > 0; i /= 2)
value += __shfl_down_sync(-1, value, i);
return value;
}
__global__
void jacobi_nat_with_norm(double ***u, double ***u_old, double ***f, int N, double *d) {
double p= 1.0/6.0;
double delta=2.0/(N+1);
*d=0.0;
int i = (blockIdx.x * blockDim.x + threadIdx.x)+1;
int j = (blockIdx.y * blockDim.y + threadIdx.y)+1;
int k = (blockIdx.z * blockDim.z + threadIdx.z)+1;
//printf("%d %d %d\n", i,j,k);
u[i][j][k] = u_old[i-1][j][k] + u_old[i+1][j][k]
+u_old[i][j-1][k] + u_old[i][j+1][k]
+u_old[i][j][k-1] + u_old[i][j][k+1]
+delta*delta*f[i][j][k];
u[i][j][k] *= p;
double value = (u[i][j][k]-u_old[i][j][k])*(u[i][j][k]-u_old[i][j][k]);
//printf("%lf ", value);
// value = warpReduceSum(value);
// if (threadIdx.x % 32 == 0)
atomicAdd(d, value);
}
__global__
void jacobi_gpu0(double *** u, double *** u_old, double *** peer_u_old, double *** f, int N) {
double p= 1.0/6.0;
double delta=2.0/(N+1);
int i = (blockIdx.x * blockDim.x + threadIdx.x)+1;
int j = (blockIdx.y * blockDim.y + threadIdx.y)+1;
int k = (blockIdx.z * blockDim.z + threadIdx.z)+1;
if(k==N/2){
u[i][j][k] = u_old[i-1][j][k] + u_old[i+1][j][k]
+u_old[i][j-1][k] + u_old[i][j+1][k]
+u_old[i][j][k-1] + peer_u_old[i][j][0]
+delta*delta*f[i][j][k];
}
else{
u[i][j][k] = u_old[i-1][j][k] + u_old[i+1][j][k]
+u_old[i][j-1][k] + u_old[i][j+1][k]
+u_old[i][j][k-1] + u_old[i][j][k+1]
+delta*delta*f[i][j][k];
}
u[i][j][k] *= p;
}
__global__
void jacobi_gpu1(double *** u, double *** u_old, double *** peer_u_old, double *** f, int N) {
double p= 1.0/6.0;
double delta=2.0/(N+1);
int i = (blockIdx.x * blockDim.x + threadIdx.x)+1;
int j = (blockIdx.y * blockDim.y + threadIdx.y)+1;
int k = (blockIdx.z * blockDim.z + threadIdx.z)+1;
if(k==1){
u[i][j][k] = u_old[i-1][j][k] + u_old[i+1][j][k]
+u_old[i][j-1][k] + u_old[i][j+1][k]
+peer_u_old[i][j][N/2] + u_old[i][j][k+1]
+delta*delta*f[i][j][k];
}
else{
u[i][j][k] = u_old[i-1][j][k] + u_old[i+1][j][k]
+u_old[i][j-1][k] + u_old[i][j+1][k]
+u_old[i][j][k-1] + u_old[i][j][k+1]
+delta*delta*f[i][j][k];
}
u[i][j][k] *= p;
}
|
b676ae98a2f1c1def8cd00ab760180164cd362ac.cu
|
/* jacobi.c - Poisson problem in 3d
*
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
__global__
void jacobi_seq(double *** u, double *** u_old, double *** f, int N) {
double p= 1.0/6.0;
int i,j,k=0;
double delta=2.0/(N+1);
for(i=1;i<=N;i++){
for(j=1;j<=N;j++){
for(k=1;k<=N;k++){
u[i][j][k] = u_old[i-1][j][k] + u_old[i+1][j][k]
+u_old[i][j-1][k] + u_old[i][j+1][k]
+u_old[i][j][k-1] + u_old[i][j][k+1]
+delta*delta*f[i][j][k];
u[i][j][k] *= p;
}
}
}
}
__global__
void jacobi_nat(double *** u, double *** u_old, double *** f, int N) {
double p= 1.0/6.0;
double delta=2.0/(N+1);
int i = (blockIdx.x * blockDim.x + threadIdx.x)+1; // remember +1!!
int j = (blockIdx.y * blockDim.y + threadIdx.y)+1;
int k = (blockIdx.z * blockDim.z + threadIdx.z)+1;
u[i][j][k] = u_old[i-1][j][k] + u_old[i+1][j][k]
+u_old[i][j-1][k] + u_old[i][j+1][k]
+u_old[i][j][k-1] + u_old[i][j][k+1]
+delta*delta*f[i][j][k];
u[i][j][k] *= p;
}
__inline__ __device__
double warpReduceSum(double value){
for (int i = 32; i > 0; i /= 2)
value += __shfl_down_sync(-1, value, i);
return value;
}
__global__
void jacobi_nat_with_norm(double ***u, double ***u_old, double ***f, int N, double *d) {
double p= 1.0/6.0;
double delta=2.0/(N+1);
*d=0.0;
int i = (blockIdx.x * blockDim.x + threadIdx.x)+1;
int j = (blockIdx.y * blockDim.y + threadIdx.y)+1;
int k = (blockIdx.z * blockDim.z + threadIdx.z)+1;
//printf("%d %d %d\n", i,j,k);
u[i][j][k] = u_old[i-1][j][k] + u_old[i+1][j][k]
+u_old[i][j-1][k] + u_old[i][j+1][k]
+u_old[i][j][k-1] + u_old[i][j][k+1]
+delta*delta*f[i][j][k];
u[i][j][k] *= p;
double value = (u[i][j][k]-u_old[i][j][k])*(u[i][j][k]-u_old[i][j][k]);
//printf("%lf ", value);
// value = warpReduceSum(value);
// if (threadIdx.x % 32 == 0)
atomicAdd(d, value);
}
__global__
void jacobi_gpu0(double *** u, double *** u_old, double *** peer_u_old, double *** f, int N) {
double p= 1.0/6.0;
double delta=2.0/(N+1);
int i = (blockIdx.x * blockDim.x + threadIdx.x)+1;
int j = (blockIdx.y * blockDim.y + threadIdx.y)+1;
int k = (blockIdx.z * blockDim.z + threadIdx.z)+1;
if(k==N/2){
u[i][j][k] = u_old[i-1][j][k] + u_old[i+1][j][k]
+u_old[i][j-1][k] + u_old[i][j+1][k]
+u_old[i][j][k-1] + peer_u_old[i][j][0]
+delta*delta*f[i][j][k];
}
else{
u[i][j][k] = u_old[i-1][j][k] + u_old[i+1][j][k]
+u_old[i][j-1][k] + u_old[i][j+1][k]
+u_old[i][j][k-1] + u_old[i][j][k+1]
+delta*delta*f[i][j][k];
}
u[i][j][k] *= p;
}
__global__
void jacobi_gpu1(double *** u, double *** u_old, double *** peer_u_old, double *** f, int N) {
double p= 1.0/6.0;
double delta=2.0/(N+1);
int i = (blockIdx.x * blockDim.x + threadIdx.x)+1;
int j = (blockIdx.y * blockDim.y + threadIdx.y)+1;
int k = (blockIdx.z * blockDim.z + threadIdx.z)+1;
if(k==1){
u[i][j][k] = u_old[i-1][j][k] + u_old[i+1][j][k]
+u_old[i][j-1][k] + u_old[i][j+1][k]
+peer_u_old[i][j][N/2] + u_old[i][j][k+1]
+delta*delta*f[i][j][k];
}
else{
u[i][j][k] = u_old[i-1][j][k] + u_old[i+1][j][k]
+u_old[i][j-1][k] + u_old[i][j+1][k]
+u_old[i][j][k-1] + u_old[i][j][k+1]
+delta*delta*f[i][j][k];
}
u[i][j][k] *= p;
}
|
c01d5c6e2be93542b985daa626c3651d22fdfee6.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <decisiontree/decisiontree_impl.h>
#include <gtest/gtest.h>
#include <linalg/gemv.h>
#include <linalg/transpose.h>
#include <sys/stat.h>
#include <test_utils.h>
#include <treelite/c_api.h>
#include <treelite/c_api_runtime.h>
#include <cstdlib>
#include <cuda_utils.cuh>
#include <cuml/ensemble/randomforest.hpp>
#include <fstream>
#include <iostream>
#include <limits>
#include <random/rng.cuh>
#include <string>
namespace ML {
using namespace MLCommon;
template <typename T> // template useless for now.
struct RfInputs {
int n_rows;
int n_cols;
int n_trees;
float max_features;
float rows_sample;
int n_inference_rows;
int max_depth;
int max_leaves;
bool bootstrap;
bool bootstrap_features;
int n_bins;
int split_algo;
int min_rows_per_node;
float min_impurity_decrease;
int n_streams;
CRITERION split_criterion;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const RfInputs<T> &dims) {
return os;
}
template <typename T, typename L>
class RfTreeliteTestCommon : public ::testing::TestWithParam<RfInputs<T>> {
protected:
void ConcatenateTreeliteModels() {
// Test the implementation for converting fitted forest into treelite format.
ModelHandle concatenated_forest_handle;
concatenated_forest_handle = concatenate_trees(treelite_indiv_handles);
compare_concat_forest_to_subforests(concatenated_forest_handle,
treelite_indiv_handles);
std::string test_name =
::testing::UnitTest::GetInstance()->current_test_info()->name();
// Get the test index from Google current_test_info.
// The test index is the string after '/' in test_name.
std::string index_str =
test_name.substr(test_name.find("/") + 1, test_name.length());
// Create a directory if the test is the first one in the test case.
int mkdir_ret = mkdir(test_dir.c_str(), 0700);
if (mkdir_ret != 0) {
// Ignore the error if the error is caused by EEXIST.
// Treelite will generate errors when the directory is not accessible.
ASSERT(errno == EEXIST, "Call mkdir %s fails.", test_dir.c_str());
}
// Create a sub-directory for the test case.
dir_name = test_dir + index_str;
CompilerHandle compiler;
// "ast_navive" is the default compiler treelite used in their Python code.
TREELITE_CHECK(TreeliteCompilerCreate("ast_native", &compiler));
int verbose = 0;
// Generate C code in the directory specified below.
// The parallel comilplation is disabled. To enable it, one needs to specify parallel_comp of CompilerHandle.
// Treelite will create a directory if it doesn't exist.
TREELITE_CHECK(TreeliteCompilerGenerateCode(
compiler, treelite_indiv_handles[0], verbose, dir_name.c_str()));
TREELITE_CHECK(TreeliteCompilerFree(compiler));
// Options copied from
// https://github.com/dmlc/treelite/blob/528d883f8f39eb5dd633e929b95915b63e210b39/python/treelite/contrib/__init__.py.
std::string obj_cmd = "gcc -c -O3 -o " + dir_name + "/main.o " + dir_name +
"/main.c -fPIC "
"-std=c99 -lm";
std::string lib_cmd = "gcc -shared -O3 -o " + dir_name +
"/treelite_model.so " + dir_name +
"/main.o -std=c99 -lm";
ASSERT(system(obj_cmd.c_str()) == 0, "Call %s fails.", obj_cmd.c_str());
ASSERT(system(lib_cmd.c_str()) == 0, "Call %s fails.", lib_cmd.c_str());
PredictorHandle predictor;
std::string lib_path = dir_name + "/treelite_model.so";
// -1 means use maximum possible worker threads.
int worker_thread = -1;
TREELITE_CHECK(
TreelitePredictorLoad(lib_path.c_str(), worker_thread, &predictor));
DenseBatchHandle dense_batch;
// Current RF dosen't seem to support missing value, put NaN to be safe.
float missing_value = std::numeric_limits<double>::quiet_NaN();
TREELITE_CHECK(TreeliteAssembleDenseBatch(
inference_data_h.data(), missing_value, params.n_inference_rows,
params.n_cols, &dense_batch));
// Use dense batch so batch_sparse is 0.
// pred_margin = true means to produce raw margins rather than transformed probability.
int batch_sparse = 0;
bool pred_margin = false;
// Allocate larger array for treelite predicted label with using multi-class classification to avoid seg faults.
// Altough later we only use first params.n_inference_rows elements.
size_t treelite_predicted_labels_size;
TREELITE_CHECK(TreelitePredictorPredictBatch(
predictor, dense_batch, batch_sparse, verbose, pred_margin,
treelite_predicted_labels.data(), &treelite_predicted_labels_size));
TREELITE_CHECK(TreeliteDeleteDenseBatch(dense_batch));
TREELITE_CHECK(TreelitePredictorFree(predictor));
TREELITE_CHECK(TreeliteFreeModel(concatenated_forest_handle));
TREELITE_CHECK(TreeliteFreeModel(treelite_indiv_handles[0]));
TREELITE_CHECK(TreeliteFreeModel(treelite_indiv_handles[1]));
TREELITE_CHECK(TreeliteFreeModel(treelite_indiv_handles[2]));
}
void getResultAndCheck() {
// Predict and compare against known labels
predict(*handle, forest, inference_data_d, params.n_inference_rows,
params.n_cols, predicted_labels_d);
RF_metrics tmp = score(*handle, forest, labels_d, params.n_inference_rows,
predicted_labels_d);
CUDA_CHECK(hipStreamSynchronize(stream));
predicted_labels_h.resize(params.n_inference_rows);
ref_predicted_labels.resize(params.n_inference_rows);
updateHost(predicted_labels_h.data(), predicted_labels_d,
params.n_inference_rows, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int i = 0; i < params.n_inference_rows; i++) {
if (is_classification) {
ref_predicted_labels[i] = static_cast<float>(predicted_labels_h[i]);
treelite_predicted_labels[i] =
treelite_predicted_labels[i] >= 0.5 ? 1 : 0;
} else {
ref_predicted_labels[i] = static_cast<float>(predicted_labels_h[i]);
}
}
EXPECT_TRUE(devArrMatchHost(
ref_predicted_labels.data(), treelite_predicted_labels.data(),
params.n_inference_rows, Compare<float>(), stream));
}
void SetUp() override {
params = ::testing::TestWithParam<RfInputs<T>>::GetParam();
DecisionTree::DecisionTreeParams tree_params;
set_tree_params(tree_params, params.max_depth, params.max_leaves,
params.max_features, params.n_bins, params.split_algo,
params.min_rows_per_node, params.min_impurity_decrease,
params.bootstrap_features, params.split_criterion, false);
set_all_rf_params(rf_params, params.n_trees, params.bootstrap,
params.rows_sample, -1, params.n_streams, tree_params);
handle.reset(new cumlHandle(rf_params.n_streams));
data_len = params.n_rows * params.n_cols;
inference_data_len = params.n_inference_rows * params.n_cols;
allocate(data_d, data_len);
allocate(inference_data_d, inference_data_len);
allocate(labels_d, params.n_rows);
allocate(predicted_labels_d, params.n_inference_rows);
treelite_predicted_labels.resize(params.n_inference_rows);
ref_predicted_labels.resize(params.n_inference_rows);
CUDA_CHECK(hipStreamCreate(&stream));
handle->setStream(stream);
forest = new typename ML::RandomForestMetaData<T, L>;
null_trees_ptr(forest);
forest_2 = new typename ML::RandomForestMetaData<T, L>;
null_trees_ptr(forest_2);
forest_3 = new typename ML::RandomForestMetaData<T, L>;
null_trees_ptr(forest_3);
all_forest_info = {forest, forest_2, forest_3};
data_h.resize(data_len);
inference_data_h.resize(inference_data_len);
// Random number generator.
Random::Rng r1(1234ULL);
// Generate data_d is in column major order.
r1.uniform(data_d, data_len, T(0.0), T(10.0), stream);
Random::Rng r2(4321ULL);
// Generate inference_data_d which is in row major order.
r2.uniform(inference_data_d, inference_data_len, T(0.0), T(10.0), stream);
updateHost(data_h.data(), data_d, data_len, stream);
updateHost(inference_data_h.data(), inference_data_d, inference_data_len,
stream);
CUDA_CHECK(hipStreamSynchronize(stream));
}
void TearDown() override {
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(data_d));
CUDA_CHECK(hipFree(inference_data_d));
CUDA_CHECK(hipFree(labels_d));
CUDA_CHECK(hipFree(predicted_labels_d));
delete forest;
delete forest_2;
delete forest_3;
all_forest_info.clear();
labels_h.clear();
predicted_labels_h.clear();
data_h.clear();
inference_data_h.clear();
treelite_predicted_labels.clear();
ref_predicted_labels.clear();
treelite_indiv_handles.clear();
}
protected:
RfInputs<T> params;
RF_params rf_params;
T *data_d, *inference_data_d;
std::vector<T> data_h;
std::vector<T> inference_data_h;
std::vector<ModelHandle> treelite_indiv_handles;
// Set to 1 for regression and 2 for binary classification
// #class for multi-classification
int task_category;
int is_classification;
int data_len;
int inference_data_len;
hipStream_t stream;
std::shared_ptr<cumlHandle> handle;
std::vector<float> treelite_predicted_labels;
std::vector<float> ref_predicted_labels;
std::vector<ML::RandomForestMetaData<T, L> *> all_forest_info;
std::string test_dir;
std::string dir_name;
L *labels_d, *predicted_labels_d;
std::vector<L> labels_h;
std::vector<L> predicted_labels_h;
RandomForestMetaData<T, L> *forest;
RandomForestMetaData<T, L> *forest_2;
RandomForestMetaData<T, L> *forest_3;
}; // namespace ML
template <typename T, typename L>
class RfConcatTestClf : public RfTreeliteTestCommon<T, L> {
protected:
void testClassifier() {
this->test_dir = "./concat_test_clf/";
this->is_classification = 1;
//task_category - 1 for regression, 2 for binary classification
// #class for multi-class classification
this->task_category = 2;
float *weight, *temp_label_d, *temp_data_d;
std::vector<float> temp_label_h;
allocate(weight, this->params.n_cols);
allocate(temp_label_d, this->params.n_rows);
allocate(temp_data_d, this->data_len);
Random::Rng r(1234ULL);
// Generate weight for each feature.
r.uniform(weight, this->params.n_cols, T(0.0), T(1.0), this->stream);
// Generate noise.
r.uniform(temp_label_d, this->params.n_rows, T(0.0), T(10.0), this->stream);
LinAlg::transpose<float>(
this->data_d, temp_data_d, this->params.n_rows, this->params.n_cols,
this->handle->getImpl().getCublasHandle(), this->stream);
LinAlg::gemv<float>(temp_data_d, this->params.n_cols, this->params.n_rows,
weight, temp_label_d, true, 1.f, 1.f,
this->handle->getImpl().getCublasHandle(),
this->stream);
temp_label_h.resize(this->params.n_rows);
updateHost(temp_label_h.data(), temp_label_d, this->params.n_rows,
this->stream);
CUDA_CHECK(hipStreamSynchronize(this->stream));
int value;
for (int i = 0; i < this->params.n_rows; i++) {
// The value of temp_label is between 0 to 10*n_cols+noise_level(10).
// Choose half of that as the theshold to balance two classes.
if (temp_label_h[i] >= (10 * this->params.n_cols + 10) / 2.0) {
value = 1;
} else {
value = 0;
}
this->labels_h.push_back(value);
}
updateDevice(this->labels_d, this->labels_h.data(), this->params.n_rows,
this->stream);
preprocess_labels(this->params.n_rows, this->labels_h, labels_map);
for (int i = 0; i < 3; i++) {
ModelHandle model;
this->rf_params.n_trees = this->rf_params.n_trees + i;
fit(*(this->handle), this->all_forest_info[i], this->data_d,
this->params.n_rows, this->params.n_cols, this->labels_d,
labels_map.size(), this->rf_params);
build_treelite_forest(&model, this->all_forest_info[i],
this->params.n_cols, this->task_category);
this->treelite_indiv_handles.push_back(model);
}
CUDA_CHECK(hipStreamSynchronize(this->stream));
this->ConcatenateTreeliteModels();
this->getResultAndCheck();
postprocess_labels(this->params.n_rows, this->labels_h, this->labels_map);
labels_map.clear();
temp_label_h.clear();
CUDA_CHECK(hipFree(weight));
CUDA_CHECK(hipFree(temp_label_d));
CUDA_CHECK(hipFree(temp_data_d));
}
protected:
std::map<int, int>
labels_map; //unique map of labels to int vals starting from 0
};
//-------------------------------------------------------------------------------------------------------------------------------------
template <typename T, typename L>
class RfConcatTestReg : public RfTreeliteTestCommon<T, L> {
protected:
void testRegressor() {
this->test_dir = "./concat_test_reg/";
this->is_classification = 0;
// task_category - 1 for regression, 2 for binary classification
// #class for multi-class classification
this->task_category = 1;
float *weight, *temp_data_d;
allocate(weight, this->params.n_cols);
allocate(temp_data_d, this->data_len);
Random::Rng r(1234ULL);
// Generate weight for each feature.
r.uniform(weight, this->params.n_cols, T(0.0), T(1.0), this->stream);
// Generate noise.
r.uniform(this->labels_d, this->params.n_rows, T(0.0), T(10.0),
this->stream);
LinAlg::transpose<float>(
this->data_d, temp_data_d, this->params.n_rows, this->params.n_cols,
this->handle->getImpl().getCublasHandle(), this->stream);
LinAlg::gemv<float>(temp_data_d, this->params.n_cols, this->params.n_rows,
weight, this->labels_d, true, 1.f, 1.f,
this->handle->getImpl().getCublasHandle(),
this->stream);
this->labels_h.resize(this->params.n_rows);
updateHost(this->labels_h.data(), this->labels_d, this->params.n_rows,
this->stream);
CUDA_CHECK(hipStreamSynchronize(this->stream));
for (int i = 0; i < 3; i++) {
ModelHandle model;
this->rf_params.n_trees = this->rf_params.n_trees + i;
fit(*(this->handle), this->all_forest_info[i], this->data_d,
this->params.n_rows, this->params.n_cols, this->labels_d,
this->rf_params);
build_treelite_forest(&model, this->all_forest_info[i],
this->params.n_cols, this->task_category);
CUDA_CHECK(hipStreamSynchronize(this->stream));
this->treelite_indiv_handles.push_back(model);
}
this->ConcatenateTreeliteModels();
this->getResultAndCheck();
CUDA_CHECK(hipFree(weight));
CUDA_CHECK(hipFree(temp_data_d));
}
};
// //-------------------------------------------------------------------------------------------------------------------------------------
const std::vector<RfInputs<float>> inputsf2_clf = {
{4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::GINI}, // single tree forest, bootstrap false, depth 8, 4 bins
{4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::GINI}, // single tree forest, bootstrap false, depth of 8, 4 bins
{4, 2, 10, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::
GINI}, //forest with 10 trees, all trees should produce identical predictions (no bootstrapping or column subsampling)
{4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::
GINI}, //forest with 10 trees, with bootstrap and column subsampling enabled, 3 bins
{4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE,
2, 0.0, 2,
CRITERION::
CRITERION_END}, //forest with 10 trees, with bootstrap and column subsampling enabled, 3 bins, different split algorithm
{4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::ENTROPY},
{4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::ENTROPY},
{4, 2, 10, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::ENTROPY},
{4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::ENTROPY},
{4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE,
2, 0.0, 2, CRITERION::ENTROPY}};
typedef RfConcatTestClf<float, int> RfClassifierConcatTestF;
TEST_P(RfClassifierConcatTestF, Convert_Clf) { testClassifier(); }
INSTANTIATE_TEST_CASE_P(RfBinaryClassifierConcatTests, RfClassifierConcatTestF,
::testing::ValuesIn(inputsf2_clf));
const std::vector<RfInputs<float>> inputsf2_reg = {
{4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::MSE},
{4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::MSE},
{4, 2, 5, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::
CRITERION_END}, // CRITERION_END uses the default criterion (GINI for classification, MSE for regression)
{4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::MAE},
{4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::GLOBAL_QUANTILE,
2, 0.0, 2, CRITERION::MAE},
{4, 2, 5, 1.0f, 1.0f, 4, 7, -1, true, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::CRITERION_END}};
typedef RfConcatTestReg<float, float> RfRegressorConcatTestF;
TEST_P(RfRegressorConcatTestF, Convert_Reg) { testRegressor(); }
INSTANTIATE_TEST_CASE_P(RfRegressorConcatTests, RfRegressorConcatTestF,
::testing::ValuesIn(inputsf2_reg));
} // end namespace ML
|
c01d5c6e2be93542b985daa626c3651d22fdfee6.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <decisiontree/decisiontree_impl.h>
#include <gtest/gtest.h>
#include <linalg/gemv.h>
#include <linalg/transpose.h>
#include <sys/stat.h>
#include <test_utils.h>
#include <treelite/c_api.h>
#include <treelite/c_api_runtime.h>
#include <cstdlib>
#include <cuda_utils.cuh>
#include <cuml/ensemble/randomforest.hpp>
#include <fstream>
#include <iostream>
#include <limits>
#include <random/rng.cuh>
#include <string>
namespace ML {
using namespace MLCommon;
template <typename T> // template useless for now.
struct RfInputs {
int n_rows;
int n_cols;
int n_trees;
float max_features;
float rows_sample;
int n_inference_rows;
int max_depth;
int max_leaves;
bool bootstrap;
bool bootstrap_features;
int n_bins;
int split_algo;
int min_rows_per_node;
float min_impurity_decrease;
int n_streams;
CRITERION split_criterion;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const RfInputs<T> &dims) {
return os;
}
template <typename T, typename L>
class RfTreeliteTestCommon : public ::testing::TestWithParam<RfInputs<T>> {
protected:
void ConcatenateTreeliteModels() {
// Test the implementation for converting fitted forest into treelite format.
ModelHandle concatenated_forest_handle;
concatenated_forest_handle = concatenate_trees(treelite_indiv_handles);
compare_concat_forest_to_subforests(concatenated_forest_handle,
treelite_indiv_handles);
std::string test_name =
::testing::UnitTest::GetInstance()->current_test_info()->name();
// Get the test index from Google current_test_info.
// The test index is the string after '/' in test_name.
std::string index_str =
test_name.substr(test_name.find("/") + 1, test_name.length());
// Create a directory if the test is the first one in the test case.
int mkdir_ret = mkdir(test_dir.c_str(), 0700);
if (mkdir_ret != 0) {
// Ignore the error if the error is caused by EEXIST.
// Treelite will generate errors when the directory is not accessible.
ASSERT(errno == EEXIST, "Call mkdir %s fails.", test_dir.c_str());
}
// Create a sub-directory for the test case.
dir_name = test_dir + index_str;
CompilerHandle compiler;
// "ast_navive" is the default compiler treelite used in their Python code.
TREELITE_CHECK(TreeliteCompilerCreate("ast_native", &compiler));
int verbose = 0;
// Generate C code in the directory specified below.
// The parallel comilplation is disabled. To enable it, one needs to specify parallel_comp of CompilerHandle.
// Treelite will create a directory if it doesn't exist.
TREELITE_CHECK(TreeliteCompilerGenerateCode(
compiler, treelite_indiv_handles[0], verbose, dir_name.c_str()));
TREELITE_CHECK(TreeliteCompilerFree(compiler));
// Options copied from
// https://github.com/dmlc/treelite/blob/528d883f8f39eb5dd633e929b95915b63e210b39/python/treelite/contrib/__init__.py.
std::string obj_cmd = "gcc -c -O3 -o " + dir_name + "/main.o " + dir_name +
"/main.c -fPIC "
"-std=c99 -lm";
std::string lib_cmd = "gcc -shared -O3 -o " + dir_name +
"/treelite_model.so " + dir_name +
"/main.o -std=c99 -lm";
ASSERT(system(obj_cmd.c_str()) == 0, "Call %s fails.", obj_cmd.c_str());
ASSERT(system(lib_cmd.c_str()) == 0, "Call %s fails.", lib_cmd.c_str());
PredictorHandle predictor;
std::string lib_path = dir_name + "/treelite_model.so";
// -1 means use maximum possible worker threads.
int worker_thread = -1;
TREELITE_CHECK(
TreelitePredictorLoad(lib_path.c_str(), worker_thread, &predictor));
DenseBatchHandle dense_batch;
// Current RF dosen't seem to support missing value, put NaN to be safe.
float missing_value = std::numeric_limits<double>::quiet_NaN();
TREELITE_CHECK(TreeliteAssembleDenseBatch(
inference_data_h.data(), missing_value, params.n_inference_rows,
params.n_cols, &dense_batch));
// Use dense batch so batch_sparse is 0.
// pred_margin = true means to produce raw margins rather than transformed probability.
int batch_sparse = 0;
bool pred_margin = false;
// Allocate larger array for treelite predicted label with using multi-class classification to avoid seg faults.
// Altough later we only use first params.n_inference_rows elements.
size_t treelite_predicted_labels_size;
TREELITE_CHECK(TreelitePredictorPredictBatch(
predictor, dense_batch, batch_sparse, verbose, pred_margin,
treelite_predicted_labels.data(), &treelite_predicted_labels_size));
TREELITE_CHECK(TreeliteDeleteDenseBatch(dense_batch));
TREELITE_CHECK(TreelitePredictorFree(predictor));
TREELITE_CHECK(TreeliteFreeModel(concatenated_forest_handle));
TREELITE_CHECK(TreeliteFreeModel(treelite_indiv_handles[0]));
TREELITE_CHECK(TreeliteFreeModel(treelite_indiv_handles[1]));
TREELITE_CHECK(TreeliteFreeModel(treelite_indiv_handles[2]));
}
void getResultAndCheck() {
// Predict and compare against known labels
predict(*handle, forest, inference_data_d, params.n_inference_rows,
params.n_cols, predicted_labels_d);
RF_metrics tmp = score(*handle, forest, labels_d, params.n_inference_rows,
predicted_labels_d);
CUDA_CHECK(cudaStreamSynchronize(stream));
predicted_labels_h.resize(params.n_inference_rows);
ref_predicted_labels.resize(params.n_inference_rows);
updateHost(predicted_labels_h.data(), predicted_labels_d,
params.n_inference_rows, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int i = 0; i < params.n_inference_rows; i++) {
if (is_classification) {
ref_predicted_labels[i] = static_cast<float>(predicted_labels_h[i]);
treelite_predicted_labels[i] =
treelite_predicted_labels[i] >= 0.5 ? 1 : 0;
} else {
ref_predicted_labels[i] = static_cast<float>(predicted_labels_h[i]);
}
}
EXPECT_TRUE(devArrMatchHost(
ref_predicted_labels.data(), treelite_predicted_labels.data(),
params.n_inference_rows, Compare<float>(), stream));
}
void SetUp() override {
params = ::testing::TestWithParam<RfInputs<T>>::GetParam();
DecisionTree::DecisionTreeParams tree_params;
set_tree_params(tree_params, params.max_depth, params.max_leaves,
params.max_features, params.n_bins, params.split_algo,
params.min_rows_per_node, params.min_impurity_decrease,
params.bootstrap_features, params.split_criterion, false);
set_all_rf_params(rf_params, params.n_trees, params.bootstrap,
params.rows_sample, -1, params.n_streams, tree_params);
handle.reset(new cumlHandle(rf_params.n_streams));
data_len = params.n_rows * params.n_cols;
inference_data_len = params.n_inference_rows * params.n_cols;
allocate(data_d, data_len);
allocate(inference_data_d, inference_data_len);
allocate(labels_d, params.n_rows);
allocate(predicted_labels_d, params.n_inference_rows);
treelite_predicted_labels.resize(params.n_inference_rows);
ref_predicted_labels.resize(params.n_inference_rows);
CUDA_CHECK(cudaStreamCreate(&stream));
handle->setStream(stream);
forest = new typename ML::RandomForestMetaData<T, L>;
null_trees_ptr(forest);
forest_2 = new typename ML::RandomForestMetaData<T, L>;
null_trees_ptr(forest_2);
forest_3 = new typename ML::RandomForestMetaData<T, L>;
null_trees_ptr(forest_3);
all_forest_info = {forest, forest_2, forest_3};
data_h.resize(data_len);
inference_data_h.resize(inference_data_len);
// Random number generator.
Random::Rng r1(1234ULL);
// Generate data_d is in column major order.
r1.uniform(data_d, data_len, T(0.0), T(10.0), stream);
Random::Rng r2(4321ULL);
// Generate inference_data_d which is in row major order.
r2.uniform(inference_data_d, inference_data_len, T(0.0), T(10.0), stream);
updateHost(data_h.data(), data_d, data_len, stream);
updateHost(inference_data_h.data(), inference_data_d, inference_data_len,
stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void TearDown() override {
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(data_d));
CUDA_CHECK(cudaFree(inference_data_d));
CUDA_CHECK(cudaFree(labels_d));
CUDA_CHECK(cudaFree(predicted_labels_d));
delete forest;
delete forest_2;
delete forest_3;
all_forest_info.clear();
labels_h.clear();
predicted_labels_h.clear();
data_h.clear();
inference_data_h.clear();
treelite_predicted_labels.clear();
ref_predicted_labels.clear();
treelite_indiv_handles.clear();
}
protected:
RfInputs<T> params;
RF_params rf_params;
T *data_d, *inference_data_d;
std::vector<T> data_h;
std::vector<T> inference_data_h;
std::vector<ModelHandle> treelite_indiv_handles;
// Set to 1 for regression and 2 for binary classification
// #class for multi-classification
int task_category;
int is_classification;
int data_len;
int inference_data_len;
cudaStream_t stream;
std::shared_ptr<cumlHandle> handle;
std::vector<float> treelite_predicted_labels;
std::vector<float> ref_predicted_labels;
std::vector<ML::RandomForestMetaData<T, L> *> all_forest_info;
std::string test_dir;
std::string dir_name;
L *labels_d, *predicted_labels_d;
std::vector<L> labels_h;
std::vector<L> predicted_labels_h;
RandomForestMetaData<T, L> *forest;
RandomForestMetaData<T, L> *forest_2;
RandomForestMetaData<T, L> *forest_3;
}; // namespace ML
template <typename T, typename L>
class RfConcatTestClf : public RfTreeliteTestCommon<T, L> {
protected:
void testClassifier() {
this->test_dir = "./concat_test_clf/";
this->is_classification = 1;
//task_category - 1 for regression, 2 for binary classification
// #class for multi-class classification
this->task_category = 2;
float *weight, *temp_label_d, *temp_data_d;
std::vector<float> temp_label_h;
allocate(weight, this->params.n_cols);
allocate(temp_label_d, this->params.n_rows);
allocate(temp_data_d, this->data_len);
Random::Rng r(1234ULL);
// Generate weight for each feature.
r.uniform(weight, this->params.n_cols, T(0.0), T(1.0), this->stream);
// Generate noise.
r.uniform(temp_label_d, this->params.n_rows, T(0.0), T(10.0), this->stream);
LinAlg::transpose<float>(
this->data_d, temp_data_d, this->params.n_rows, this->params.n_cols,
this->handle->getImpl().getCublasHandle(), this->stream);
LinAlg::gemv<float>(temp_data_d, this->params.n_cols, this->params.n_rows,
weight, temp_label_d, true, 1.f, 1.f,
this->handle->getImpl().getCublasHandle(),
this->stream);
temp_label_h.resize(this->params.n_rows);
updateHost(temp_label_h.data(), temp_label_d, this->params.n_rows,
this->stream);
CUDA_CHECK(cudaStreamSynchronize(this->stream));
int value;
for (int i = 0; i < this->params.n_rows; i++) {
// The value of temp_label is between 0 to 10*n_cols+noise_level(10).
// Choose half of that as the theshold to balance two classes.
if (temp_label_h[i] >= (10 * this->params.n_cols + 10) / 2.0) {
value = 1;
} else {
value = 0;
}
this->labels_h.push_back(value);
}
updateDevice(this->labels_d, this->labels_h.data(), this->params.n_rows,
this->stream);
preprocess_labels(this->params.n_rows, this->labels_h, labels_map);
for (int i = 0; i < 3; i++) {
ModelHandle model;
this->rf_params.n_trees = this->rf_params.n_trees + i;
fit(*(this->handle), this->all_forest_info[i], this->data_d,
this->params.n_rows, this->params.n_cols, this->labels_d,
labels_map.size(), this->rf_params);
build_treelite_forest(&model, this->all_forest_info[i],
this->params.n_cols, this->task_category);
this->treelite_indiv_handles.push_back(model);
}
CUDA_CHECK(cudaStreamSynchronize(this->stream));
this->ConcatenateTreeliteModels();
this->getResultAndCheck();
postprocess_labels(this->params.n_rows, this->labels_h, this->labels_map);
labels_map.clear();
temp_label_h.clear();
CUDA_CHECK(cudaFree(weight));
CUDA_CHECK(cudaFree(temp_label_d));
CUDA_CHECK(cudaFree(temp_data_d));
}
protected:
std::map<int, int>
labels_map; //unique map of labels to int vals starting from 0
};
//-------------------------------------------------------------------------------------------------------------------------------------
template <typename T, typename L>
class RfConcatTestReg : public RfTreeliteTestCommon<T, L> {
protected:
void testRegressor() {
this->test_dir = "./concat_test_reg/";
this->is_classification = 0;
// task_category - 1 for regression, 2 for binary classification
// #class for multi-class classification
this->task_category = 1;
float *weight, *temp_data_d;
allocate(weight, this->params.n_cols);
allocate(temp_data_d, this->data_len);
Random::Rng r(1234ULL);
// Generate weight for each feature.
r.uniform(weight, this->params.n_cols, T(0.0), T(1.0), this->stream);
// Generate noise.
r.uniform(this->labels_d, this->params.n_rows, T(0.0), T(10.0),
this->stream);
LinAlg::transpose<float>(
this->data_d, temp_data_d, this->params.n_rows, this->params.n_cols,
this->handle->getImpl().getCublasHandle(), this->stream);
LinAlg::gemv<float>(temp_data_d, this->params.n_cols, this->params.n_rows,
weight, this->labels_d, true, 1.f, 1.f,
this->handle->getImpl().getCublasHandle(),
this->stream);
this->labels_h.resize(this->params.n_rows);
updateHost(this->labels_h.data(), this->labels_d, this->params.n_rows,
this->stream);
CUDA_CHECK(cudaStreamSynchronize(this->stream));
for (int i = 0; i < 3; i++) {
ModelHandle model;
this->rf_params.n_trees = this->rf_params.n_trees + i;
fit(*(this->handle), this->all_forest_info[i], this->data_d,
this->params.n_rows, this->params.n_cols, this->labels_d,
this->rf_params);
build_treelite_forest(&model, this->all_forest_info[i],
this->params.n_cols, this->task_category);
CUDA_CHECK(cudaStreamSynchronize(this->stream));
this->treelite_indiv_handles.push_back(model);
}
this->ConcatenateTreeliteModels();
this->getResultAndCheck();
CUDA_CHECK(cudaFree(weight));
CUDA_CHECK(cudaFree(temp_data_d));
}
};
// //-------------------------------------------------------------------------------------------------------------------------------------
const std::vector<RfInputs<float>> inputsf2_clf = {
{4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::GINI}, // single tree forest, bootstrap false, depth 8, 4 bins
{4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::GINI}, // single tree forest, bootstrap false, depth of 8, 4 bins
{4, 2, 10, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::
GINI}, //forest with 10 trees, all trees should produce identical predictions (no bootstrapping or column subsampling)
{4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::
GINI}, //forest with 10 trees, with bootstrap and column subsampling enabled, 3 bins
{4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE,
2, 0.0, 2,
CRITERION::
CRITERION_END}, //forest with 10 trees, with bootstrap and column subsampling enabled, 3 bins, different split algorithm
{4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::ENTROPY},
{4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::ENTROPY},
{4, 2, 10, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::ENTROPY},
{4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::ENTROPY},
{4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE,
2, 0.0, 2, CRITERION::ENTROPY}};
typedef RfConcatTestClf<float, int> RfClassifierConcatTestF;
TEST_P(RfClassifierConcatTestF, Convert_Clf) { testClassifier(); }
INSTANTIATE_TEST_CASE_P(RfBinaryClassifierConcatTests, RfClassifierConcatTestF,
::testing::ValuesIn(inputsf2_clf));
const std::vector<RfInputs<float>> inputsf2_reg = {
{4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::MSE},
{4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::MSE},
{4, 2, 5, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::
CRITERION_END}, // CRITERION_END uses the default criterion (GINI for classification, MSE for regression)
{4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::MAE},
{4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::GLOBAL_QUANTILE,
2, 0.0, 2, CRITERION::MAE},
{4, 2, 5, 1.0f, 1.0f, 4, 7, -1, true, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2,
CRITERION::CRITERION_END}};
typedef RfConcatTestReg<float, float> RfRegressorConcatTestF;
TEST_P(RfRegressorConcatTestF, Convert_Reg) { testRegressor(); }
INSTANTIATE_TEST_CASE_P(RfRegressorConcatTests, RfRegressorConcatTestF,
::testing::ValuesIn(inputsf2_reg));
} // end namespace ML
|
9272b66c6fc0ccc4ed3db69ae562fa70a89f7c29.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <string>
#include <string.h>
#include <stdlib.h>
#include <sstream>
#include <multithreading.h>
// DEPRECATED since CUDA 5.0
//#include <cutil.h>
//#include <cutil_inline.h>
//#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <helper_timer.h>
#include "crackkernel.cu"
#include "entry.cpp"
#include "zip.c"
#include "pbkdf2.cpp"
using namespace std;
dim3 threads(128);
dim3 blocks(12,10);
int threadcount = threads.x*blocks.x*blocks.y;
//Chot dung de chia cot cua hang.
int width = 80;
unsigned char extension,in[CHUNK],found=0;
/*Bien luu thong tin ve tep da duoc ma hoa
- mang chua gia tri PVV
- mang chua gia tri salt
*/
int dkLen,saltLen,len;
int stored_pvv[2];
int S[16];
/*Danh cho da GPU, lay ve so GPU duoc CUDA ho tro, co the dung de tinh toan*/
const int MAX_GPU_COUNT = 8;
int GPU_N;
CUTThread threadID[MAX_GPU_COUNT];
TGPUplan plan[MAX_GPU_COUNT];
char *emptyArr;
char temp2[20];
float time_parallel = 0;
//Chua cac tu trong tu dien
char hostArray[869229][80];
/*Ket thuc*/
fcrypt_ctx h_zcx[1];
/*Bien chua thong tin van pham*/
int v_prev, v_curr;
int Sk[33];
/*Ket thuc bien chua thong tin van pham*/
void initHost(){
/*Hien thi so GPU duoc ho tro*/
checkCudaErrors(hipGetDeviceCount(&GPU_N));
if(GPU_N > MAX_GPU_COUNT) GPU_N = MAX_GPU_COUNT;
printf("\nCUDA-capable device count: %i\n", GPU_N);
/*Ket thuc qua trinh hien thi*/
emptyArr = (char*)malloc(sizeof(char)*width*threadcount);
memset(emptyArr, '\0', sizeof(char)*width*threadcount);
for (int i=0;i<GPU_N;i++)
{
//khoi tao plan->device
plan[i].device=i;
// Chuong trinh moi giai quyet van de la quantities = 1
plan[i].quantities = 1;
}
//khoi tao cho zcx
h_zcx->mode = 1;
h_zcx->encr_pos = BLOCK_SIZE;
memset(h_zcx->nonce, 0, BLOCK_SIZE * sizeof(unsigned char));
}
void freeCUDA()
{
for (int i=0;i<GPU_N;i++)
{
hipFree(plan[i].devPass);
hipFree(plan[i].d_pre_terminal);
hipFree(plan[i].deviceArrPtr);
hipFree(plan[i].d_salt);
hipFree(plan[i].d_pvv);
hipFree(plan[i].d_in);
hipFree(plan[i].d_out);
}
}
static CUT_THREADPROC solverThread(TGPUplan *plan){
/******************************************************************
Khai bao bien
******************************************************************/
//devPitch - truyen vao nhung khi lay gia tri ra thi lai khong dung den no.
size_t devPitch;
int pivot_base = 0;
int ret[threadcount];
//Khai bao mang hostPass de hien thi cac mat khau tra ve.
char hostPass[threadcount][80];
memset(hostPass,'\0', sizeof(char)*threadcount*80);
/*****************************************************************
Ket thuc khai bao bien
******************************************************************/
memset(ret,-1,sizeof(int)*threadcount);
/*****************************************************************
Cap phat bo nho tren moi GPU, truyen du lieu can cho tinh toan tu Host len Device
*****************************************************************/
//Set device
checkCudaErrors(hipSetDevice(plan->device));
hipMallocPitch((void**)&plan->devPass, &devPitch, width * sizeof(char), threadcount);
//Khoi tao plan->deviceArrPtr cho moi GPU
hipMallocPitch((void**)&plan->deviceArrPtr, &devPitch, width * sizeof(char), plan->wordCount);
hipMemcpy2D(plan->deviceArrPtr, width*sizeof(char), hostArray + plan->startIndex, width*sizeof(char), width, plan->wordCount, hipMemcpyHostToDevice);
//Khoi tao gia tri kiem tra mat khau tren moi GPU
hipMalloc((void**)&plan->d_salt, sizeof(int) * 16);
hipMemcpy(plan->d_salt, S, sizeof(int) * 16, hipMemcpyHostToDevice);
hipMalloc((void**)&plan->d_pvv, sizeof(int) * 2);
hipMemcpy(plan->d_pvv, stored_pvv, sizeof(int) * 2, hipMemcpyHostToDevice);
hipMalloc((void**)&plan->d_pre_terminal, sizeof(char) * strlen(temp2));
hipMemcpy(plan->d_pre_terminal, temp2, sizeof(char) * strlen(temp2), hipMemcpyHostToDevice);
hipMalloc((void**)&plan->d_out, threadcount*CHUNK*sizeof(unsigned char));
hipMalloc((void**)&plan->d_in,threadcount*CHUNK*sizeof(unsigned char));
hipMalloc((void**)&plan->d_Key,threadcount*16*sizeof(unsigned char));
hipMalloc((void**)&plan->d_ret,threadcount*sizeof(unsigned int));
plan->Key = (unsigned char *)malloc(sizeof(unsigned char)*16*threadcount);
//cap phat bo nho cho phan giai ma
hipMalloc((void**)&plan->d_zcx,threadcount*sizeof(fcrypt_ctx));
hipMalloc((void**)&plan->d_acx,threadcount*sizeof(aes_ctx));
//cap phat bo nho cho phan giai nen
hipMalloc((void**)&plan->d_strm, threadcount*sizeof(z_stream));
hipMalloc((void**)&plan->d_state,threadcount*sizeof(struct inflate_state FAR));
/****************************************************************
Ket thuc qua trinh truyen du lieu
*****************************************************************/
/****************************************************************
Qua trinh goi Kernel nhieu lan, viec goi la doc lap giua cac Device
*****************************************************************/
pivot_base = plan->device*threadcount;
//checkCudaErrors(hipDeviceSynchronize());
while((pivot_base < plan->wordCount)&&(!found))
{
//Reset lai cac gia tri truoc moi lan chay Kernel
hipMemcpy2D(plan->devPass, width*sizeof(char), emptyArr,width*sizeof(char), width, threadcount, hipMemcpyHostToDevice);
hipMemset (plan->d_out, 0, threadcount*CHUNK);
for (int i=0;i<threadcount;i++) {
hipMemcpy(plan->d_in+i*CHUNK, in, CHUNK*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(plan->d_zcx+i, h_zcx, sizeof(fcrypt_ctx), hipMemcpyHostToDevice);}
hipMemset (plan->d_ret, -1, threadcount*sizeof(int));
//chay kernel
hipLaunchKernelGGL(( RunKernel), dim3(blocks), dim3(threads), 0, 0,
pivot_base,
plan->devPass,
plan->deviceArrPtr, width, plan->quantities,
plan->wordCount, plan->d_pre_terminal,strlen(temp2), plan->d_salt, saltLen, plan->d_pvv, dkLen,plan->d_in,len,plan->d_out,extension,plan->d_Key,plan->d_ret,plan->d_zcx,plan->d_acx,plan->d_strm,plan->d_state);
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr, "#DEVICE ERROR#: ", hipGetErrorString(error));
freeCUDA();
return ;
}
else
{
//Cap nhat lai pivot_base
pivot_base += GPU_N*threadcount;
hipMemcpy2D(hostPass, width*sizeof(char), plan->devPass, width*sizeof(char),width,threadcount, hipMemcpyDeviceToHost);
hipMemcpy(ret,plan->d_ret,sizeof(int)*threadcount,hipMemcpyDeviceToHost);
hipMemcpy(plan->Key,plan->d_Key,sizeof(unsigned char)*16*threadcount,hipMemcpyDeviceToHost);
//cout << "\n----------------------------------------------------------------------\n";
//cout << "\tTong thoi gian: " << cutGetTimerValue(timer) << "ms";
//cout << "\t" << pivot_base << "/" << GPU_N << " ma da thu.\n";
for (int i1=0; i1 < threadcount; i1++)
if (strcmp(hostPass[i1], "") != 0) //Tim thay ma giai
{
cout << "\nThe correct password is: ";
cout << hostPass[i1] << "\n";
found=1;
}
}
checkCudaErrors(hipDeviceSynchronize());
}
/*****************************************************************
Ket thuc qua trinh goi kernel nhieu lan, doc lap giua cac Device.
*****************************************************************/
hipFree(plan->devPass);
hipFree(plan->d_pre_terminal);
hipFree(plan->deviceArrPtr);
hipFree(plan->d_salt);
hipFree(plan->d_pvv);
hipFree(plan->d_out);
hipFree(plan->d_in);
hipFree(plan->d_Key);
hipFree(plan->d_ret);
hipFree(plan->d_zcx);
hipFree(plan->d_acx);
hipFree(plan->d_strm);
hipFree(plan->d_state);
free(plan->Key);
/*****************************************************************
Lenh dinh thoi gian va lenh thoat tat ca cac tien trinh
******************************************************************/
hipDeviceReset();
CUT_THREADEND;
/*****************************************************************
Ket thuc
******************************************************************/
}
void crack(){
unsigned int timer=0;
cutCreateTimer(&timer);
cutStartTimer(timer);
/*Moi tien trinh tren CPU quan ly mot GPU, ta co GPU_N nen can co GPU_N tien trinh song song tren Host quan ly */
for(int GPUIndex = 0; GPUIndex < GPU_N; GPUIndex++)
threadID[GPUIndex] = cutStartThread((CUT_THREADROUTINE)solverThread, &plan[GPUIndex]);
//printf("main(): waiting...\n");
cutWaitForThreads(threadID, GPU_N);
cout <<cutGetTimerValue(timer) << "ms\n";
cout << "\n---------------------------------------------------------------------------------------------------------------\n";
time_parallel += cutGetTimerValue(timer);
cutStopTimer(timer);
cutDeleteTimer(timer);
}
void readGrammar(char *filename1, char *filename2, int *count)
{
memset(Sk, 0, 33*sizeof(int));
printf("\nA grammar:");
*count = ReadRules(filename1); //argv[2]
FILE *fp;
char buffer[80] = "";
fp =fopen(filename2, "r"); //argv[3]
//Khoi tao hostArray.
if (fp != NULL)
{
int h = 0;
while(fgets(buffer, sizeof(buffer), fp) != NULL)
{
if(h==0)
{
v_prev= v_curr = strlen(buffer)-1;
Sk[v_curr] = h;
}
v_curr = strlen(buffer)-1;
if(v_curr != v_prev)
{
Sk[v_curr] = h;
v_prev = v_curr;
}
strcpy(hostArray[h], buffer);
h++;
strcpy(buffer, "");
}
fclose(fp);
}
}
int checkInfo(char *filename)
{
ZIP_LOCAL_FILE_HEADER* lfh;
FILE* pt;
pt = fopen(filename, "rb");
lfh = (ZIP_LOCAL_FILE_HEADER*) malloc(sizeof(ZIP_LOCAL_FILE_HEADER));
if(!pt) return -1;
read_lfh(lfh, pt);
if(get_bit_encrypted_ornot(lfh, pt) != 1)
{
cout<< "File is not encrypted";
return -1;
}
else
{
char *cp;
cp = strrchr(get_fname(lfh, pt), '.');
if (strcmp(cp, ".pdf")==0) extension = dotpdf;
if (strcmp(cp, ".doc")==0) extension = dotdoc;
if (strcmp(cp, ".txt")==0) extension = dottxt;
*cp=0;
printf("File is encrypted , parameters:");
/*---------------------------------------------------------------------------------------------
Lay gia tri salt, authentication code, password verification value chi co, khi file da encrypt
----------------------------------------------------------------------------------------------*/
display_salt_pvv_ac(lfh,pt,S,&saltLen,stored_pvv,&dkLen);
fseek(pt, 30 + lfh->fn_length + lfh->ex_length + SALT_LENGTH(1) + PWD_VER_LENGTH, SEEK_SET);
len = (int)fread(in, sizeof(unsigned char),CHUNK, pt);
fclose(pt);
}
return 1;
}
void multiFunction(int light, int count)
{
struct entry *working_value = NULL;
struct entry *head = NULL;
struct entry *tail = NULL;
int status = 0;
if(light == 6)
{
//Goi khoi tao host mot lan
initHost();
}
char temp_preterminal[20] = "";
char search_characters[4]="";
char temp_base[20]="";
//Xay dung cay va duyet cau truc dang pre-terminal
//1. Phan 1: all base structs
for(int i = 1; i< count; i++)
{
if(strcmp(Column1[i],"S") == 0)
{
//Xoa search_character va temp_terminal
strcpy(search_characters,"");
strcpy(temp_preterminal,"");
working_value = (struct entry *)malloc(sizeof(struct entry));
strcpy(working_value->base,Column2[i]);
working_value->pivot = 0;
working_value->num_strings = 0;
for(int j = 0; j< strlen(Column2[i]); j++)
{
if(Column2[i][j] == 'L' || Column2[i][j] == 'S' || Column2[i][j]=='D')
working_value->num_strings++;
}
//Tinh xac suat va pre_terminal
working_value->probability = Column3[i];
//Duyet cau truc cua Column2 de tinh xac suat.
int k;
char temp[2];
for(int j = 0; j< strlen(Column2[i]);)
{
k = 0;
search_characters[k] = Column2[i][j++];
while((Column2[i][j] != 'D') && (Column2[i][j] != 'L') && (Column2[i][j] != 'S'))
{
search_characters[++k] = Column2[i][j++];
if(Column2[i][j] == '\0') break;
}
//Thoat co nghia vi tri j da la bat dau mot ki tu moi, nhung chua gan. k tang len mot gia tri
search_characters[++k] = '\0';
//Kiem tra ki tu dau co phai la ki tu L. Neu la L chi cap nhat lai xau pre_terminal de phan biet. Khong
//cap nhat xac suat.
if (search_characters[0] == 'L')
{
temp[0] = 'H';
temp[1] = '\0';
strcat(temp_preterminal, temp);
strcat(temp_preterminal,search_characters);
strcat(temp_preterminal, temp);
}
else
{
//Neu khong phai, thi tim kiem va cap nhat lai xac suat
for(int t = 1; t < count; t ++)
{
if(strcmp(Column1[t],search_characters) == 0)
{
strcat(temp_preterminal,Column2[t]);
working_value->probability = working_value->probability * Column3[t];
break;
}
}
} //Ket thuc la ki tu D hoac S
//Cap nhat xac suat lon nhat roi thoat
}// Het vong for, thi da xac dinh duoc xac suat, va dong thoi la pre_terminal
strcpy(working_value->pre_terminal,temp_preterminal);
//Buoc cuoi cua giai doan 1: Them no vao queue uu tien
if(status ==0)
{
working_value->next = NULL;
working_value->prev = NULL;
head = tail = working_value;
status = 1;
}
else
{
//Them vao cuoi queue
working_value->next = NULL;
working_value->prev = tail;
tail->next = working_value;
tail = working_value;
}
working_value = NULL;
}
else
{
break;
} //ket thuc cua if-else
} //Ket thuc cua for.
/*Buoc 2. Vua xay dung cay, vua dua ra danh sach mat khau, lam dau vao cho giai thuat PBKDF2
cai nay co the dua vao mot ham, phan cap chuc nang
Co the toi uu chuc nang tim kiem, thuc hien pop nhanh hon.
Giai thuat nay co the thuc hien song song hoa duoc, giong nhu giai thuat tim kiem song song tren danh sach.
*/
int order=0;
working_value = Pop(head);
if(light == 6)
{
printf("\n%-12s %-15s %-10s %-15s %-15s %-15s %-15s %-15s\n","Base","pre_terminal","pivot","num_strings","probability","order", "Keys","Time");
cout << "\n----------------------------------------**-----------------------------------**----------------------------------\n";
}
else if(light == 3)
{
printf("\n%-12s %-15s %-10s %-15s %-15s %-15s\n","Base","pre_terminal","pivot","num_strings","probability","order");
cout << "\n-------------------------------**----------------------------**-----------------------------\n";
}
while((working_value != NULL)&&(!found))
{
order++;
int qualities = 0;
int sk;
for(int h = 0; h< strlen(working_value->pre_terminal); h++)
if(working_value->pre_terminal[h] == 'L')
{
qualities++;
sk = (int)working_value->pre_terminal[h + 1] - 48;
}
strcpy(temp2, working_value->pre_terminal);
if(light == 6)
{
/* truyen cac thong so pre_terminal lay duoc tu thao tac Pop sang devce - GPU_N device*/
for(int deviceIndex = 0; deviceIndex < GPU_N; deviceIndex++)
{
plan[deviceIndex].wordCount = Sk[sk+1] - Sk[sk];
plan[deviceIndex].startIndex = Sk[sk];
}
/*Goi song song GPU_N tien trinh tren CPU quan ly GPU_N GPU*/
//Sinh cac mat khau bang cach ghep cau truc pre_terminal voi tu dien chua cac tu co nghia.
printf("\n%-12s %-15s %-10d %-15d %-15f %-15d %-15ld",working_value->base,working_value->pre_terminal,
working_value->pivot,working_value->num_strings, working_value->probability,order,Sk[sk+1] - Sk[sk]);
crack();
}
else if(light == 3)
{
printf("%-12s %-15s %-10d %-15d %-15f %-15d\n",working_value->base,working_value->pre_terminal,
working_value->pivot,working_value->num_strings, working_value->probability,order);
DisplayGuestPassword(working_value->pre_terminal, strlen(working_value->pre_terminal),hostArray,1, Sk[sk], Sk[sk+1], S, saltLen, stored_pvv, dkLen,3);
}
else if(light == 4)
{
printf("%-12s %-15s %-10d %-15d %-15f %-15d\n",working_value->base,working_value->pre_terminal,
working_value->pivot,working_value->num_strings, working_value->probability,order);
DisplayGuestPassword(working_value->pre_terminal, strlen(working_value->pre_terminal),hostArray,1, Sk[sk], Sk[sk+1], S, saltLen, stored_pvv, dkLen,4);
}
//Tiep tuc xay dung cay, insert va pop entry
for(int i= working_value->pivot; i< working_value->num_strings; i++)
{
strcpy(temp_base, working_value->base); // temp_base = "D1L3S2D1"
/*Khai bao du lieu, chi co pham vi trong vong for nay */
int k; // Chi so chay trung gian
char temp[2]; // temp[2] = 'L' || 'S' || 'D'
char temp1[2]; // temp1[2] = 'H' -> Dung trong phan cach L.
int index = -1; // index cua variable, chi biet co replace duoc hay khong.
strcpy(temp_preterminal,""); // xoa xau temp_preterminal, de sau do dung lai (khai bao gan ham main)
// child_value->pre_terminal = temp_preterminal.
int segment = 0; // chi so base, cho biet cat tu xau base tu dau den dau.
// vi du 4L3$$4, cat S2 tu dau den dau
char temp_copy[10]; // xau tu segment cho den het (segment + (int)atoi(search_characters)
/*Phan tich temp_base de lay chu so va chi thi la D, L hay S. No cho phep minh biet cach doc bao nhieu ki
tu tu cau truc pre_terminal cua working_value sang child_working_value*/
//Bien cho biet co chen them vao duoc hay khong
bool agreement = false;
float reprobability = working_value->probability;
for(int j = 0; j < strlen(temp_base);)
{
strcpy(search_characters,"");// xoa search_characters, dung lai bien o phan tren.
// chang han search_characters = 1 hoac 2 hoac 1, nho loc bo ki tu
// D truoc D1, ki tu S truoc S2, ki tu D truoc D1 cua temp_base.
/* Lay ki tu dau tien cua temp_base*/
k=0;
temp[0] = temp_base[j];
temp[1] = '\0';
/*end */
j = j +1;
while((temp_base[j] != 'D') && (temp_base[j] != 'L') && (temp_base[j] != 'S'))
{
search_characters[k++] = temp_base[j++];
if(temp_base[j] == '\0') break;
}
//Ket thuc xau
search_characters[k] = '\0';
index++;
//temp_preterminal
if(temp[0] == 'L')
{
if(index == i)
{
agreement = false;
break; //Thoat ra khoi for theo j.
}
temp1[0] = 'H';
temp1[1] = '\0';
strcat(temp_preterminal, temp1);
strcat(temp_preterminal, temp);
strcat(temp_preterminal, search_characters);
strcat(temp_preterminal, temp1);
//Phai cap nhat lai segment
segment = segment + 3 + strlen(search_characters);
}
else
{
//Phai tinh den so sanh index voi chi so i.
if(index != i)
{
//Chi don thuan la copy cau truc tu vi tri segment cho den het (segment + (int)atoi(search_characters))
strcpy(temp_copy,""); // Chi luu tru tam thoi
int q;
for(q = segment; q < segment + (int)atoi(search_characters); q++)
{
temp_copy[q-segment] = working_value->pre_terminal[q];
}
temp_copy[q-segment] = '\0';
//Cap nhat lai segment, de cho lan copy sau.
segment = segment + (int)atoi(search_characters);
strcat(temp_preterminal, temp_copy);
}
else if(temp[0] == 'L')
{
agreement = false;
break; //Thoat ra khoi for theo j.
}
else //Neu vao trong day ma khong thay the xau moi thi huy bo.
{
//Ghep giua temp voi search_characters lai voi nhau de ra dang, chang han nhu S2 => Goi la search_str.
//Trich xuat ki tu o working_value->pre_terminal, tai vi tri segment den segment + (int)atoi(search_characters).
//duoc goi la pointed_str. Neu thay the duoc, thi cap nhat luon xac suat cua no, dong thoi tao ra them duoc nut
//moi
char search_str[4];
char pointed_str[4];
strcpy(search_str,temp);
strcat(search_str,search_characters);
strcpy(temp_copy,""); //ok da xoa temp_copy
int q;
for(q = segment; q < segment + (int)atoi(search_characters); q++)
{
temp_copy[q-segment] = working_value->pre_terminal[q];
}
temp_copy[q-segment] = '\0';
strcpy(pointed_str, temp_copy);
//Tim kiem de thay the. Chu yeu la do tim vi tri d.
for(int d = 1; d < count; d++)
{
if(strcmp(Column1[d],search_str)==0)
{
if(strcmp(Column2[d], pointed_str)==0)
{
segment += strlen(pointed_str);
if( (d+1 < count) && (strcmp(Column1[d+1],search_str)==0))
{
//Them moi duoc, nghia la con ki tu thay the, xu ly tai day
//Neu thay the duoc, thi copy cho den het j
strcat(temp_preterminal,Column2[d+1]);
// Tinh lai xac suat
reprobability = (reprobability*Column3[d+1])/Column3[d];
agreement = true;
break;
}
else
{
//Vi tri nay da het cho. Quay tro ve tang i len, cho den het.
agreement = false;
break;
}
}
}
} //Ket thuc for tim kiem xau thay the
} //Ket thuc else - index
} //Ket thuc else - L
} //Ket thuc vong lap theo temp_base.
if(agreement == true)
{
//Them moi vao cuoi danh sach dang xet.
struct entry *child_value;
child_value = (struct entry *)malloc(sizeof(struct entry));
strcpy(child_value->base,working_value->base);
strcpy(child_value->pre_terminal,temp_preterminal);
child_value->pivot = i;
child_value->num_strings = working_value->num_strings;
child_value->probability = reprobability;
child_value->next = NULL;
child_value->prev = tail;
tail->next = child_value;
tail = child_value;
}
} //Ket thuc for theo bien chay i
//Sau do thi giai phong entry working_value.
if(working_value->prev == NULL)
{
if(working_value->next == NULL)
{
free(working_value);
head = tail = NULL;
}
else
{
(working_value->next)->prev = NULL;
head = (working_value->next);
free(working_value);
}
}
else
{
if(working_value->next == NULL)
{
(working_value->prev)->next = NULL;
tail = working_value->prev;
free(working_value);
}
else
{
(working_value->next)->prev = working_value->prev;
(working_value->prev)->next = working_value->next;
free(working_value);
}
}
working_value = Pop(head);
} // Ket thuc vong lap while
if(light == 6)
{
cout << "\nThe end ...\n";
}
}
void checkCandidatePasswords()
{
int P[60]={0};
string password = "";
int passLen;
cin.get();
printf("\nNhap mat khau kiem tra:\n");
getline(cin, password);
passLen = password.length();
for(int i = 0; i < passLen; i++)
P[i] = password[i];
if(PBKDF2_1(S,saltLen,stored_pvv,dkLen,P, passLen) != 0)
printf("\nLa mat khau ung cu");
else
printf("\nKhong phai la mat khau ung cu");
}
int main(int argc, char *argv[]){
int isEncrypted = 0;
char ch;
int count;
while(1)
{
printf("\n1.Thong tin co ban cua tep nen Zip va van pham");
printf("\n2.Kiem tra mot mat khau la ung cu");
printf("\n3.Sinh mat khau tuan tu");
printf("\n4.Tap mat khau ung cu - tt tuan tu");
printf("\n5.Sinh mat khau song song");
printf("\n6.Pha mat khau song song");
printf("\n7.Thoat chuong trinh");
printf("\nLua chon chuc nang(1->7):");
fflush(stdin);
fflush(stdin);
ch = getchar();
switch(ch)
{
case '1':
isEncrypted = checkInfo(argv[1]);
printf("\nisEncrypted = %d", isEncrypted);
if (isEncrypted == 1) readGrammar(argv[2], argv[3], &count);
cin.get();
break;
case '2':
if(isEncrypted == 1)
{
checkCandidatePasswords();
}
else
{
printf("\nPhai goi chuc nang 1 truoc");
}
cin.get();
break;
case '3':
multiFunction(3,count);
cin.get();
break;
case '4':
multiFunction(4,count);
cin.get();
break;
case '5':
multiFunction(5,count);
cin.get();
break;
case '6':
if (isEncrypted == 1)
{
multiFunction(6,count);
}
else
{
printf("\nPhai goi chuc nang 1 truoc");
}
cin.get();
break;
case '7':exit(1);
}
}
}
|
9272b66c6fc0ccc4ed3db69ae562fa70a89f7c29.cu
|
#include <iostream>
#include <string>
#include <string.h>
#include <stdlib.h>
#include <sstream>
#include <multithreading.h>
// DEPRECATED since CUDA 5.0
//#include <cutil.h>
//#include <cutil_inline.h>
//#include <cuda_runtime_api.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <helper_timer.h>
#include "crackkernel.cu"
#include "entry.cpp"
#include "zip.c"
#include "pbkdf2.cpp"
using namespace std;
dim3 threads(128);
dim3 blocks(12,10);
int threadcount = threads.x*blocks.x*blocks.y;
//Chot dung de chia cot cua hang.
int width = 80;
unsigned char extension,in[CHUNK],found=0;
/*Bien luu thong tin ve tep da duoc ma hoa
- mang chua gia tri PVV
- mang chua gia tri salt
*/
int dkLen,saltLen,len;
int stored_pvv[2];
int S[16];
/*Danh cho da GPU, lay ve so GPU duoc CUDA ho tro, co the dung de tinh toan*/
const int MAX_GPU_COUNT = 8;
int GPU_N;
CUTThread threadID[MAX_GPU_COUNT];
TGPUplan plan[MAX_GPU_COUNT];
char *emptyArr;
char temp2[20];
float time_parallel = 0;
//Chua cac tu trong tu dien
char hostArray[869229][80];
/*Ket thuc*/
fcrypt_ctx h_zcx[1];
/*Bien chua thong tin van pham*/
int v_prev, v_curr;
int Sk[33];
/*Ket thuc bien chua thong tin van pham*/
void initHost(){
/*Hien thi so GPU duoc ho tro*/
checkCudaErrors(cudaGetDeviceCount(&GPU_N));
if(GPU_N > MAX_GPU_COUNT) GPU_N = MAX_GPU_COUNT;
printf("\nCUDA-capable device count: %i\n", GPU_N);
/*Ket thuc qua trinh hien thi*/
emptyArr = (char*)malloc(sizeof(char)*width*threadcount);
memset(emptyArr, '\0', sizeof(char)*width*threadcount);
for (int i=0;i<GPU_N;i++)
{
//khoi tao plan->device
plan[i].device=i;
// Chuong trinh moi giai quyet van de la quantities = 1
plan[i].quantities = 1;
}
//khoi tao cho zcx
h_zcx->mode = 1;
h_zcx->encr_pos = BLOCK_SIZE;
memset(h_zcx->nonce, 0, BLOCK_SIZE * sizeof(unsigned char));
}
void freeCUDA()
{
for (int i=0;i<GPU_N;i++)
{
cudaFree(plan[i].devPass);
cudaFree(plan[i].d_pre_terminal);
cudaFree(plan[i].deviceArrPtr);
cudaFree(plan[i].d_salt);
cudaFree(plan[i].d_pvv);
cudaFree(plan[i].d_in);
cudaFree(plan[i].d_out);
}
}
static CUT_THREADPROC solverThread(TGPUplan *plan){
/******************************************************************
Khai bao bien
******************************************************************/
//devPitch - truyen vao nhung khi lay gia tri ra thi lai khong dung den no.
size_t devPitch;
int pivot_base = 0;
int ret[threadcount];
//Khai bao mang hostPass de hien thi cac mat khau tra ve.
char hostPass[threadcount][80];
memset(hostPass,'\0', sizeof(char)*threadcount*80);
/*****************************************************************
Ket thuc khai bao bien
******************************************************************/
memset(ret,-1,sizeof(int)*threadcount);
/*****************************************************************
Cap phat bo nho tren moi GPU, truyen du lieu can cho tinh toan tu Host len Device
*****************************************************************/
//Set device
checkCudaErrors(cudaSetDevice(plan->device));
cudaMallocPitch((void**)&plan->devPass, &devPitch, width * sizeof(char), threadcount);
//Khoi tao plan->deviceArrPtr cho moi GPU
cudaMallocPitch((void**)&plan->deviceArrPtr, &devPitch, width * sizeof(char), plan->wordCount);
cudaMemcpy2D(plan->deviceArrPtr, width*sizeof(char), hostArray + plan->startIndex, width*sizeof(char), width, plan->wordCount, cudaMemcpyHostToDevice);
//Khoi tao gia tri kiem tra mat khau tren moi GPU
cudaMalloc((void**)&plan->d_salt, sizeof(int) * 16);
cudaMemcpy(plan->d_salt, S, sizeof(int) * 16, cudaMemcpyHostToDevice);
cudaMalloc((void**)&plan->d_pvv, sizeof(int) * 2);
cudaMemcpy(plan->d_pvv, stored_pvv, sizeof(int) * 2, cudaMemcpyHostToDevice);
cudaMalloc((void**)&plan->d_pre_terminal, sizeof(char) * strlen(temp2));
cudaMemcpy(plan->d_pre_terminal, temp2, sizeof(char) * strlen(temp2), cudaMemcpyHostToDevice);
cudaMalloc((void**)&plan->d_out, threadcount*CHUNK*sizeof(unsigned char));
cudaMalloc((void**)&plan->d_in,threadcount*CHUNK*sizeof(unsigned char));
cudaMalloc((void**)&plan->d_Key,threadcount*16*sizeof(unsigned char));
cudaMalloc((void**)&plan->d_ret,threadcount*sizeof(unsigned int));
plan->Key = (unsigned char *)malloc(sizeof(unsigned char)*16*threadcount);
//cap phat bo nho cho phan giai ma
cudaMalloc((void**)&plan->d_zcx,threadcount*sizeof(fcrypt_ctx));
cudaMalloc((void**)&plan->d_acx,threadcount*sizeof(aes_ctx));
//cap phat bo nho cho phan giai nen
cudaMalloc((void**)&plan->d_strm, threadcount*sizeof(z_stream));
cudaMalloc((void**)&plan->d_state,threadcount*sizeof(struct inflate_state FAR));
/****************************************************************
Ket thuc qua trinh truyen du lieu
*****************************************************************/
/****************************************************************
Qua trinh goi Kernel nhieu lan, viec goi la doc lap giua cac Device
*****************************************************************/
pivot_base = plan->device*threadcount;
//checkCudaErrors(cudaThreadSynchronize());
while((pivot_base < plan->wordCount)&&(!found))
{
//Reset lai cac gia tri truoc moi lan chay Kernel
cudaMemcpy2D(plan->devPass, width*sizeof(char), emptyArr,width*sizeof(char), width, threadcount, cudaMemcpyHostToDevice);
cudaMemset (plan->d_out, 0, threadcount*CHUNK);
for (int i=0;i<threadcount;i++) {
cudaMemcpy(plan->d_in+i*CHUNK, in, CHUNK*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(plan->d_zcx+i, h_zcx, sizeof(fcrypt_ctx), cudaMemcpyHostToDevice);}
cudaMemset (plan->d_ret, -1, threadcount*sizeof(int));
//chay kernel
RunKernel<<<blocks, threads>>>(
pivot_base,
plan->devPass,
plan->deviceArrPtr, width, plan->quantities,
plan->wordCount, plan->d_pre_terminal,strlen(temp2), plan->d_salt, saltLen, plan->d_pvv, dkLen,plan->d_in,len,plan->d_out,extension,plan->d_Key,plan->d_ret,plan->d_zcx,plan->d_acx,plan->d_strm,plan->d_state);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "#DEVICE ERROR#: ", cudaGetErrorString(error));
freeCUDA();
return ;
}
else
{
//Cap nhat lai pivot_base
pivot_base += GPU_N*threadcount;
cudaMemcpy2D(hostPass, width*sizeof(char), plan->devPass, width*sizeof(char),width,threadcount, cudaMemcpyDeviceToHost);
cudaMemcpy(ret,plan->d_ret,sizeof(int)*threadcount,cudaMemcpyDeviceToHost);
cudaMemcpy(plan->Key,plan->d_Key,sizeof(unsigned char)*16*threadcount,cudaMemcpyDeviceToHost);
//cout << "\n----------------------------------------------------------------------\n";
//cout << "\tTong thoi gian: " << cutGetTimerValue(timer) << "ms";
//cout << "\t" << pivot_base << "/" << GPU_N << " ma da thu.\n";
for (int i1=0; i1 < threadcount; i1++)
if (strcmp(hostPass[i1], "") != 0) //Tim thay ma giai
{
cout << "\nThe correct password is: ";
cout << hostPass[i1] << "\n";
found=1;
}
}
checkCudaErrors(cudaThreadSynchronize());
}
/*****************************************************************
Ket thuc qua trinh goi kernel nhieu lan, doc lap giua cac Device.
*****************************************************************/
cudaFree(plan->devPass);
cudaFree(plan->d_pre_terminal);
cudaFree(plan->deviceArrPtr);
cudaFree(plan->d_salt);
cudaFree(plan->d_pvv);
cudaFree(plan->d_out);
cudaFree(plan->d_in);
cudaFree(plan->d_Key);
cudaFree(plan->d_ret);
cudaFree(plan->d_zcx);
cudaFree(plan->d_acx);
cudaFree(plan->d_strm);
cudaFree(plan->d_state);
free(plan->Key);
/*****************************************************************
Lenh dinh thoi gian va lenh thoat tat ca cac tien trinh
******************************************************************/
cudaThreadExit();
CUT_THREADEND;
/*****************************************************************
Ket thuc
******************************************************************/
}
void crack(){
unsigned int timer=0;
cutCreateTimer(&timer);
cutStartTimer(timer);
/*Moi tien trinh tren CPU quan ly mot GPU, ta co GPU_N nen can co GPU_N tien trinh song song tren Host quan ly */
for(int GPUIndex = 0; GPUIndex < GPU_N; GPUIndex++)
threadID[GPUIndex] = cutStartThread((CUT_THREADROUTINE)solverThread, &plan[GPUIndex]);
//printf("main(): waiting...\n");
cutWaitForThreads(threadID, GPU_N);
cout <<cutGetTimerValue(timer) << "ms\n";
cout << "\n---------------------------------------------------------------------------------------------------------------\n";
time_parallel += cutGetTimerValue(timer);
cutStopTimer(timer);
cutDeleteTimer(timer);
}
void readGrammar(char *filename1, char *filename2, int *count)
{
memset(Sk, 0, 33*sizeof(int));
printf("\nA grammar:");
*count = ReadRules(filename1); //argv[2]
FILE *fp;
char buffer[80] = "";
fp =fopen(filename2, "r"); //argv[3]
//Khoi tao hostArray.
if (fp != NULL)
{
int h = 0;
while(fgets(buffer, sizeof(buffer), fp) != NULL)
{
if(h==0)
{
v_prev= v_curr = strlen(buffer)-1;
Sk[v_curr] = h;
}
v_curr = strlen(buffer)-1;
if(v_curr != v_prev)
{
Sk[v_curr] = h;
v_prev = v_curr;
}
strcpy(hostArray[h], buffer);
h++;
strcpy(buffer, "");
}
fclose(fp);
}
}
int checkInfo(char *filename)
{
ZIP_LOCAL_FILE_HEADER* lfh;
FILE* pt;
pt = fopen(filename, "rb");
lfh = (ZIP_LOCAL_FILE_HEADER*) malloc(sizeof(ZIP_LOCAL_FILE_HEADER));
if(!pt) return -1;
read_lfh(lfh, pt);
if(get_bit_encrypted_ornot(lfh, pt) != 1)
{
cout<< "File is not encrypted";
return -1;
}
else
{
char *cp;
cp = strrchr(get_fname(lfh, pt), '.');
if (strcmp(cp, ".pdf")==0) extension = dotpdf;
if (strcmp(cp, ".doc")==0) extension = dotdoc;
if (strcmp(cp, ".txt")==0) extension = dottxt;
*cp=0;
printf("File is encrypted , parameters:");
/*---------------------------------------------------------------------------------------------
Lay gia tri salt, authentication code, password verification value chi co, khi file da encrypt
----------------------------------------------------------------------------------------------*/
display_salt_pvv_ac(lfh,pt,S,&saltLen,stored_pvv,&dkLen);
fseek(pt, 30 + lfh->fn_length + lfh->ex_length + SALT_LENGTH(1) + PWD_VER_LENGTH, SEEK_SET);
len = (int)fread(in, sizeof(unsigned char),CHUNK, pt);
fclose(pt);
}
return 1;
}
void multiFunction(int light, int count)
{
struct entry *working_value = NULL;
struct entry *head = NULL;
struct entry *tail = NULL;
int status = 0;
if(light == 6)
{
//Goi khoi tao host mot lan
initHost();
}
char temp_preterminal[20] = "";
char search_characters[4]="";
char temp_base[20]="";
//Xay dung cay va duyet cau truc dang pre-terminal
//1. Phan 1: all base structs
for(int i = 1; i< count; i++)
{
if(strcmp(Column1[i],"S") == 0)
{
//Xoa search_character va temp_terminal
strcpy(search_characters,"");
strcpy(temp_preterminal,"");
working_value = (struct entry *)malloc(sizeof(struct entry));
strcpy(working_value->base,Column2[i]);
working_value->pivot = 0;
working_value->num_strings = 0;
for(int j = 0; j< strlen(Column2[i]); j++)
{
if(Column2[i][j] == 'L' || Column2[i][j] == 'S' || Column2[i][j]=='D')
working_value->num_strings++;
}
//Tinh xac suat va pre_terminal
working_value->probability = Column3[i];
//Duyet cau truc cua Column2 de tinh xac suat.
int k;
char temp[2];
for(int j = 0; j< strlen(Column2[i]);)
{
k = 0;
search_characters[k] = Column2[i][j++];
while((Column2[i][j] != 'D') && (Column2[i][j] != 'L') && (Column2[i][j] != 'S'))
{
search_characters[++k] = Column2[i][j++];
if(Column2[i][j] == '\0') break;
}
//Thoat co nghia vi tri j da la bat dau mot ki tu moi, nhung chua gan. k tang len mot gia tri
search_characters[++k] = '\0';
//Kiem tra ki tu dau co phai la ki tu L. Neu la L chi cap nhat lai xau pre_terminal de phan biet. Khong
//cap nhat xac suat.
if (search_characters[0] == 'L')
{
temp[0] = 'H';
temp[1] = '\0';
strcat(temp_preterminal, temp);
strcat(temp_preterminal,search_characters);
strcat(temp_preterminal, temp);
}
else
{
//Neu khong phai, thi tim kiem va cap nhat lai xac suat
for(int t = 1; t < count; t ++)
{
if(strcmp(Column1[t],search_characters) == 0)
{
strcat(temp_preterminal,Column2[t]);
working_value->probability = working_value->probability * Column3[t];
break;
}
}
} //Ket thuc la ki tu D hoac S
//Cap nhat xac suat lon nhat roi thoat
}// Het vong for, thi da xac dinh duoc xac suat, va dong thoi la pre_terminal
strcpy(working_value->pre_terminal,temp_preterminal);
//Buoc cuoi cua giai doan 1: Them no vao queue uu tien
if(status ==0)
{
working_value->next = NULL;
working_value->prev = NULL;
head = tail = working_value;
status = 1;
}
else
{
//Them vao cuoi queue
working_value->next = NULL;
working_value->prev = tail;
tail->next = working_value;
tail = working_value;
}
working_value = NULL;
}
else
{
break;
} //ket thuc cua if-else
} //Ket thuc cua for.
/*Buoc 2. Vua xay dung cay, vua dua ra danh sach mat khau, lam dau vao cho giai thuat PBKDF2
cai nay co the dua vao mot ham, phan cap chuc nang
Co the toi uu chuc nang tim kiem, thuc hien pop nhanh hon.
Giai thuat nay co the thuc hien song song hoa duoc, giong nhu giai thuat tim kiem song song tren danh sach.
*/
int order=0;
working_value = Pop(head);
if(light == 6)
{
printf("\n%-12s %-15s %-10s %-15s %-15s %-15s %-15s %-15s\n","Base","pre_terminal","pivot","num_strings","probability","order", "Keys","Time");
cout << "\n----------------------------------------**-----------------------------------**----------------------------------\n";
}
else if(light == 3)
{
printf("\n%-12s %-15s %-10s %-15s %-15s %-15s\n","Base","pre_terminal","pivot","num_strings","probability","order");
cout << "\n-------------------------------**----------------------------**-----------------------------\n";
}
while((working_value != NULL)&&(!found))
{
order++;
int qualities = 0;
int sk;
for(int h = 0; h< strlen(working_value->pre_terminal); h++)
if(working_value->pre_terminal[h] == 'L')
{
qualities++;
sk = (int)working_value->pre_terminal[h + 1] - 48;
}
strcpy(temp2, working_value->pre_terminal);
if(light == 6)
{
/* truyen cac thong so pre_terminal lay duoc tu thao tac Pop sang devce - GPU_N device*/
for(int deviceIndex = 0; deviceIndex < GPU_N; deviceIndex++)
{
plan[deviceIndex].wordCount = Sk[sk+1] - Sk[sk];
plan[deviceIndex].startIndex = Sk[sk];
}
/*Goi song song GPU_N tien trinh tren CPU quan ly GPU_N GPU*/
//Sinh cac mat khau bang cach ghep cau truc pre_terminal voi tu dien chua cac tu co nghia.
printf("\n%-12s %-15s %-10d %-15d %-15f %-15d %-15ld",working_value->base,working_value->pre_terminal,
working_value->pivot,working_value->num_strings, working_value->probability,order,Sk[sk+1] - Sk[sk]);
crack();
}
else if(light == 3)
{
printf("%-12s %-15s %-10d %-15d %-15f %-15d\n",working_value->base,working_value->pre_terminal,
working_value->pivot,working_value->num_strings, working_value->probability,order);
DisplayGuestPassword(working_value->pre_terminal, strlen(working_value->pre_terminal),hostArray,1, Sk[sk], Sk[sk+1], S, saltLen, stored_pvv, dkLen,3);
}
else if(light == 4)
{
printf("%-12s %-15s %-10d %-15d %-15f %-15d\n",working_value->base,working_value->pre_terminal,
working_value->pivot,working_value->num_strings, working_value->probability,order);
DisplayGuestPassword(working_value->pre_terminal, strlen(working_value->pre_terminal),hostArray,1, Sk[sk], Sk[sk+1], S, saltLen, stored_pvv, dkLen,4);
}
//Tiep tuc xay dung cay, insert va pop entry
for(int i= working_value->pivot; i< working_value->num_strings; i++)
{
strcpy(temp_base, working_value->base); // temp_base = "D1L3S2D1"
/*Khai bao du lieu, chi co pham vi trong vong for nay */
int k; // Chi so chay trung gian
char temp[2]; // temp[2] = 'L' || 'S' || 'D'
char temp1[2]; // temp1[2] = 'H' -> Dung trong phan cach L.
int index = -1; // index cua variable, chi biet co replace duoc hay khong.
strcpy(temp_preterminal,""); // xoa xau temp_preterminal, de sau do dung lai (khai bao gan ham main)
// child_value->pre_terminal = temp_preterminal.
int segment = 0; // chi so base, cho biet cat tu xau base tu dau den dau.
// vi du 4L3$$4, cat S2 tu dau den dau
char temp_copy[10]; // xau tu segment cho den het (segment + (int)atoi(search_characters)
/*Phan tich temp_base de lay chu so va chi thi la D, L hay S. No cho phep minh biet cach doc bao nhieu ki
tu tu cau truc pre_terminal cua working_value sang child_working_value*/
//Bien cho biet co chen them vao duoc hay khong
bool agreement = false;
float reprobability = working_value->probability;
for(int j = 0; j < strlen(temp_base);)
{
strcpy(search_characters,"");// xoa search_characters, dung lai bien o phan tren.
// chang han search_characters = 1 hoac 2 hoac 1, nho loc bo ki tu
// D truoc D1, ki tu S truoc S2, ki tu D truoc D1 cua temp_base.
/* Lay ki tu dau tien cua temp_base*/
k=0;
temp[0] = temp_base[j];
temp[1] = '\0';
/*end */
j = j +1;
while((temp_base[j] != 'D') && (temp_base[j] != 'L') && (temp_base[j] != 'S'))
{
search_characters[k++] = temp_base[j++];
if(temp_base[j] == '\0') break;
}
//Ket thuc xau
search_characters[k] = '\0';
index++;
//temp_preterminal
if(temp[0] == 'L')
{
if(index == i)
{
agreement = false;
break; //Thoat ra khoi for theo j.
}
temp1[0] = 'H';
temp1[1] = '\0';
strcat(temp_preterminal, temp1);
strcat(temp_preterminal, temp);
strcat(temp_preterminal, search_characters);
strcat(temp_preterminal, temp1);
//Phai cap nhat lai segment
segment = segment + 3 + strlen(search_characters);
}
else
{
//Phai tinh den so sanh index voi chi so i.
if(index != i)
{
//Chi don thuan la copy cau truc tu vi tri segment cho den het (segment + (int)atoi(search_characters))
strcpy(temp_copy,""); // Chi luu tru tam thoi
int q;
for(q = segment; q < segment + (int)atoi(search_characters); q++)
{
temp_copy[q-segment] = working_value->pre_terminal[q];
}
temp_copy[q-segment] = '\0';
//Cap nhat lai segment, de cho lan copy sau.
segment = segment + (int)atoi(search_characters);
strcat(temp_preterminal, temp_copy);
}
else if(temp[0] == 'L')
{
agreement = false;
break; //Thoat ra khoi for theo j.
}
else //Neu vao trong day ma khong thay the xau moi thi huy bo.
{
//Ghep giua temp voi search_characters lai voi nhau de ra dang, chang han nhu S2 => Goi la search_str.
//Trich xuat ki tu o working_value->pre_terminal, tai vi tri segment den segment + (int)atoi(search_characters).
//duoc goi la pointed_str. Neu thay the duoc, thi cap nhat luon xac suat cua no, dong thoi tao ra them duoc nut
//moi
char search_str[4];
char pointed_str[4];
strcpy(search_str,temp);
strcat(search_str,search_characters);
strcpy(temp_copy,""); //ok da xoa temp_copy
int q;
for(q = segment; q < segment + (int)atoi(search_characters); q++)
{
temp_copy[q-segment] = working_value->pre_terminal[q];
}
temp_copy[q-segment] = '\0';
strcpy(pointed_str, temp_copy);
//Tim kiem de thay the. Chu yeu la do tim vi tri d.
for(int d = 1; d < count; d++)
{
if(strcmp(Column1[d],search_str)==0)
{
if(strcmp(Column2[d], pointed_str)==0)
{
segment += strlen(pointed_str);
if( (d+1 < count) && (strcmp(Column1[d+1],search_str)==0))
{
//Them moi duoc, nghia la con ki tu thay the, xu ly tai day
//Neu thay the duoc, thi copy cho den het j
strcat(temp_preterminal,Column2[d+1]);
// Tinh lai xac suat
reprobability = (reprobability*Column3[d+1])/Column3[d];
agreement = true;
break;
}
else
{
//Vi tri nay da het cho. Quay tro ve tang i len, cho den het.
agreement = false;
break;
}
}
}
} //Ket thuc for tim kiem xau thay the
} //Ket thuc else - index
} //Ket thuc else - L
} //Ket thuc vong lap theo temp_base.
if(agreement == true)
{
//Them moi vao cuoi danh sach dang xet.
struct entry *child_value;
child_value = (struct entry *)malloc(sizeof(struct entry));
strcpy(child_value->base,working_value->base);
strcpy(child_value->pre_terminal,temp_preterminal);
child_value->pivot = i;
child_value->num_strings = working_value->num_strings;
child_value->probability = reprobability;
child_value->next = NULL;
child_value->prev = tail;
tail->next = child_value;
tail = child_value;
}
} //Ket thuc for theo bien chay i
//Sau do thi giai phong entry working_value.
if(working_value->prev == NULL)
{
if(working_value->next == NULL)
{
free(working_value);
head = tail = NULL;
}
else
{
(working_value->next)->prev = NULL;
head = (working_value->next);
free(working_value);
}
}
else
{
if(working_value->next == NULL)
{
(working_value->prev)->next = NULL;
tail = working_value->prev;
free(working_value);
}
else
{
(working_value->next)->prev = working_value->prev;
(working_value->prev)->next = working_value->next;
free(working_value);
}
}
working_value = Pop(head);
} // Ket thuc vong lap while
if(light == 6)
{
cout << "\nThe end ...\n";
}
}
void checkCandidatePasswords()
{
int P[60]={0};
string password = "";
int passLen;
cin.get();
printf("\nNhap mat khau kiem tra:\n");
getline(cin, password);
passLen = password.length();
for(int i = 0; i < passLen; i++)
P[i] = password[i];
if(PBKDF2_1(S,saltLen,stored_pvv,dkLen,P, passLen) != 0)
printf("\nLa mat khau ung cu");
else
printf("\nKhong phai la mat khau ung cu");
}
int main(int argc, char *argv[]){
int isEncrypted = 0;
char ch;
int count;
while(1)
{
printf("\n1.Thong tin co ban cua tep nen Zip va van pham");
printf("\n2.Kiem tra mot mat khau la ung cu");
printf("\n3.Sinh mat khau tuan tu");
printf("\n4.Tap mat khau ung cu - tt tuan tu");
printf("\n5.Sinh mat khau song song");
printf("\n6.Pha mat khau song song");
printf("\n7.Thoat chuong trinh");
printf("\nLua chon chuc nang(1->7):");
fflush(stdin);
fflush(stdin);
ch = getchar();
switch(ch)
{
case '1':
isEncrypted = checkInfo(argv[1]);
printf("\nisEncrypted = %d", isEncrypted);
if (isEncrypted == 1) readGrammar(argv[2], argv[3], &count);
cin.get();
break;
case '2':
if(isEncrypted == 1)
{
checkCandidatePasswords();
}
else
{
printf("\nPhai goi chuc nang 1 truoc");
}
cin.get();
break;
case '3':
multiFunction(3,count);
cin.get();
break;
case '4':
multiFunction(4,count);
cin.get();
break;
case '5':
multiFunction(5,count);
cin.get();
break;
case '6':
if (isEncrypted == 1)
{
multiFunction(6,count);
}
else
{
printf("\nPhai goi chuc nang 1 truoc");
}
cin.get();
break;
case '7':exit(1);
}
}
}
|
015b731b3b73aac1f2745954601073e04e476af7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*BSD License
Copyright belongs to the uploader, all rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, with the name of the uploader, and this list of conditions;
Redistributions in binary form must reproduce the above copyright notice, with the name of the uploader, and this list of conditions in the documentation and/or other materials provided with the distribution;
Neither the name of the uploader nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
*/
#include <stdio.h>
#include <math.h>
#include "string_funcs.cu"
//#include "hash_funcs.cpp"
#include "defs.h"
//#include "hash_funcs.h"
//#include "cuda_k.h"
#include "hash_funcs_cuda.h"
char *device_local;
unsigned int *token_length_device;
CalcFreqController *token_division_controller_device;
MyHashMapElement *hash_doc_token_sub_tables_device;
MyHashMapElement *hash_doc_token_tables_device;
MyHashMapElement *occ_hash_table_device;
float *bucket_sqrt_sum_device;
float *doc_similarity_matrix_device;
int *doc_rank_matrix_device;
__device__ bool stripPrefixes ( char *str);
__global__ void StripAffixes(char *dev_res, unsigned int *token_length, CalcFreqController *controller, int docs_count);
__global__ void MakeDocHash2(char *dev_mem, unsigned int *token_length, CalcFreqController *controller,
MyHashMapElement *hash_doc_token_sub_tables, MyHashMapElement *hash_doc_token_tables, int sub_table_size, int table_size, int maxRows, size_t pitch1, size_t pitch2);
__global__ void MakeDocHash(char *dev_mem, unsigned int *token_length, CalcFreqController *controller,
MyHashMapElement *hash_doc_token_sub_tables, MyHashMapElement *hash_doc_token_tables, int sub_table_size, int table_size, int maxRows, size_t pitch1, size_t pitch2);
size_t pitch1;
size_t pitch2;
__global__ void AddToOccTable(MyHashMapElement *hash_doc_token_tables, MyHashMapElement *occ_hash_table, int numDocs, size_t pitch2);
float *simbase;
int *rankbase;
#define TRUE 1
#define FALSE 0
#define tablesize PACKET_SIZE
//int strCmp(char *str1, char *str2);
/* This is djb2 hashing algorithm by Dan Bernstien, from comp.lang.c*/
__device__ unsigned long computeHashCuda(char *str)
{
#if 1
unsigned long hash = 5381;
int c;
while (c = *str++)
hash = ((hash << 5) + hash) + c; // hash * 33 + c
return hash;
#else
unsigned long hash = 0;
int c;
int i = 0;
while (c = *str++)
{
hash = hash * i + c;
i++;
}
return hash;
#endif
}
// my stuff
__device__ void initHashTableCuda(MyHashMapElement *hme, int tablerange, int subrange)
{
MyHashMapElement *bucket = hme;
for (int i = 0; i != tablerange; i++)
{
bucket->countInBuc = 0;
/*dbg{
bucket->freq = 0; // TODO not necessary
bucket->key = 0xDEADBEAF;
bucket->tokenLength = 0;
bucket->subkey = 0;
for (int j = 0; j < subrange; j++)
{
(bucket+j)->countInBuc = 0;
(bucket+j)->freq = 0;
(bucket+j)->key = 0xDEADBEAF;
(bucket+j)->tokenLength = 0;
}
}*/
bucket += subrange;
}
}
__device__ bool insertElementCuda(MyHashMapElement *hme, unsigned long key, int keyshift, int bucketsize, int strlength, int initvalue)
{
unsigned long newkey = key & ( (1 << keyshift) - 1 ); // clear the MSBs
MyHashMapElement *bucket = &hme[newkey * bucketsize];
int numEleInBucket = bucket->countInBuc;
// search if the same element is in the bucket, if in, incr the frequency
for (int i = 0; i != numEleInBucket; i++)
{
if (bucket[i].key == key && bucket[i].tokenLength == strlength)
{
bucket[i].freq+=initvalue;
return true;
}
}
if (numEleInBucket == bucketsize) return false; // if bucket full, drop the element TODO
bucket[0].countInBuc++;
bucket[numEleInBucket].key = key;
bucket[numEleInBucket].freq = initvalue;
bucket[numEleInBucket].tokenLength = strlength;
dbg{
bucket[numEleInBucket].subkey = newkey;
bucket[numEleInBucket].countInBuc = numEleInBucket + 1;
}
return true;
// bucket[numEleInBucket].docIndex =
// bucket[numEleInBucket].tokenLength =
}
__device__ int findElementCuda(MyHashMapElement *hme, unsigned long key, int keyshift, int bucketsize, int strlength)
{
unsigned long newkey = key & ( (1 << keyshift) - 1 ); // clear the MSBs
MyHashMapElement *bucket = &hme[newkey * bucketsize];
int numEleInBucket = bucket->countInBuc;
// search if the same element is in the bucket, if in, incr the frequency
for (int i = 0; i != numEleInBucket; i++)
{
if (bucket[i].key == key && bucket[i].tokenLength == strlength)
return bucket[i].freq;
}
return 0;
}
__constant__ char prefixes[][16]= { "kilo", "micro", "milli", "intra", "ultra", "mega", "nano", "pico", "pseudo"};
__constant__ char suffixes2[][2][16] = { { "ational", "ate" },
{ "tional", "tion" },
{ "enci", "ence" },
{ "anci", "ance" },
{ "izer", "ize" },
{ "iser", "ize" },
{ "abli", "able" },
{ "alli", "al" },
{ "entli", "ent" },
{ "eli", "e" },
{ "ousli", "ous" },
{ "ization", "ize" },
{ "isation", "ize" },
{ "ation", "ate" },
{ "ator", "ate" },
{ "alism", "al" },
{ "iveness", "ive" },
{ "fulness", "ful" },
{ "ousness", "ous" },
{ "aliti", "al" },
{ "iviti", "ive" },
{ "biliti", "ble" }};
__constant__ char suffixes3[][2][16] = { { "icate", "ic" },
{ "ative", "" },
{ "alize", "al" },
{ "alise", "al" },
{ "iciti", "ic" },
{ "ical", "ic" },
{ "ful", "" },
{ "ness", "" }};
__constant__ char suffixes4[][16] = { "al",
"ance",
"ence",
"er",
"ic",
"able", "ible", "ant", "ement", "ment", "ent", "sion", "tion",
"ou", "ism", "ate", "iti", "ous", "ive", "ize", "ise"};
__device__ bool step1(char *str ) {
char stem[32];
bool changed = false;
if ( str[strLen(str)-1] == 's' ) {
if ( (hasSuffix( str, "sses", stem ))
|| (hasSuffix( str, "ies", stem)) ){
str[strLen(str)-2] = '\0';
changed = true;
}
else {
if ( ( strLen(str) == 1 )
&& ( str[strLen(str)-1] == 's' ) ) {
str[0] = '\0';
return true;
}
if ( str[strLen(str)-2 ] != 's' ) {
str[strLen(str)-1] = '\0';
changed = true;
}
}
}
if ( hasSuffix( str,"eed",stem ) ) {
if ( measure( stem ) > 0 ) {
str[strLen(str)-1] = '\0';
changed = true;
}
}
else {
if ( (hasSuffix( str,"ed",stem ))
|| (hasSuffix( str,"ing",stem )) ) {
if (containsVowel( stem )) {
if(stem[0]=='\0')
{
str[0]='\0';
changed = true;
}
else
{
str[strLen(stem)] = '\0';
changed = true;
}
if ( strLen(str) == 1 )
return changed;
if ( ( hasSuffix( str,"at",stem) )
|| ( hasSuffix( str,"bl",stem ) )
|| ( hasSuffix( str,"iz",stem) ) ) {
int len = strLen(str);
str[len-1] = 'e';
str[len] = '\0';
changed = true;
}
else {
int length = strLen(str);
if ( (str[length-1] == str[length-2])
&& (str[length-1] != 'l')
&& (str[length-1] != 's')
&& (str[length-1] != 'z') ) {
str[length-1]='\0';
changed = true;
}
else
if ( measure( str ) == 1 ) {
if ( cvc(str) )
{
str[length-1]='e';
str[length]='\0';
changed = true;
}
}
}
}
}
}
if ( hasSuffix(str,"y",stem) )
if ( containsVowel( stem ) ) {
int len = strLen(str);
str[len-1]='i';
str[len]='\0';
changed = true;
}
return changed;
}
__device__ bool step2( char *str ) {
char stem[32];
int last = sizeof(suffixes2)/(sizeof(char)*2*16); //strange way of calculating length of array
bool changed = false;
for ( int index = 0 ; index < last; index++ ) {
if ( hasSuffix ( str, suffixes2[index][0], stem ) ) {
if ( measure ( stem ) > 0 ) {
int stemlen, suffixlen, j;
stemlen = strLen(stem);
suffixlen = strLen(suffixes2[index][1]);
changed = true;
for(j=0; j<suffixlen; j++)
str[stemlen+j] = suffixes2[index][1][j];
str[stemlen+j] = '\0';
}
}
}
return changed;
}
__device__ bool step3( char *str ) {
char stem[32];
int last = sizeof(suffixes3)/(sizeof(char)*2*16); //strange way of calculating length of array/
bool changed= false;
for ( int index = 0 ; index<last; index++ ) {
if ( hasSuffix ( str, suffixes3[index][0], stem ))
if ( measure ( stem ) > 0 ) {
int stemlen, suffixlen, j;
stemlen = strLen(stem);
suffixlen = strLen(suffixes3[index][1]);
changed = true;
for( j=0; j<suffixlen; j++)
str[stemlen+j] = suffixes3[index][1][j];
str[stemlen+j] = '\0';
}
}
return changed ;
}
__device__ bool step4( char *str ) {
char stem[32];
int last = sizeof(suffixes4)/(sizeof(char)*16); //strange way of calculating length of array
bool changed = false;
for ( int index = 0 ; index<last; index++ ) {
if ( hasSuffix ( str, suffixes4[index], stem ) ) {
changed = true;
if ( measure ( stem ) > 1 ) {
str[strLen(stem)] = '\0';
}
}
}
return changed;
}
__device__ bool step5( char *str ) {
bool changed = false;
if ( str[strLen(str)-1] == 'e' ) {
if ( measure(str) > 1 ) {
str[strLen(str)-1] = '\0';
changed = true;
}
else
if ( measure(str) == 1 ) {
char stem[32];
int i;
for ( i=0; i<strLen(str)-1; i++ )
stem[i] = str[i];
stem[i] = '\0';
if ( !cvc(stem) ){
str[strLen(str)-1] = '\0';
changed = true;
}
}
}
if ( strLen(str) == 1 )
return true;
if ( (str[strLen(str)-1] == 'l')
&& (str[strLen(str)-2] == 'l') && (measure(str) > 1) )
if ( measure(str) > 1 ) {
str[strLen(str)-1] = '\0';
changed = true;
}
return changed;
}
__device__ bool stripSuffixes(char *str ) {
bool changed = false;
changed = step1( str );
if ( strLen(str) >= 1 )
changed |= step2( str );
if ( strLen(str) >= 1 )
changed |= step3( str );
if ( strLen(str) >= 1 )
changed |= step4( str );
if ( strLen(str) >= 1 )
changed |= step5( str );
return changed;
}
__device__ bool stripPrefixes ( char *str) {
int newLen, j;
bool found = false;
int last = sizeof(prefixes)/(sizeof(char)*16); //strange way of calculating length of array
for ( int i=0 ; i<last; i++ )
{
//Find if str starts with prefix prefixes[i]
found = prefixFind(str, prefixes[i]);
if (found)
{
newLen = strLen(str) - strLen(prefixes[i]);
for (j=0 ; j < newLen; j++ )
str[j] = str[j+strLen(prefixes[i])];
str[j] = '\0';
}
}
return found;
}
void StripAffixesWrapper(char *host_local, unsigned int *token_length_host, CalcFreqController *token_division_controller_host, int tokens_count, int docs_count, int g, int b)
{
//hipMalloc
hipMalloc(&device_local, tokens_count * sizeof(char) * TOKEN_MAX_SIZE_PLUS_END);
hipMalloc(&token_length_device, tokens_count*sizeof(unsigned int));
hipMalloc(&token_division_controller_device, docs_count * sizeof(CalcFreqController));
//cuda Mempcpy
hipMemcpy(device_local, host_local, tokens_count * sizeof(char) * TOKEN_MAX_SIZE_PLUS_END, hipMemcpyHostToDevice);
hipMemcpy(token_length_device, token_length_host, tokens_count*sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(token_division_controller_device, token_division_controller_host, docs_count * sizeof(CalcFreqController), hipMemcpyHostToDevice);
//kernel call
hipLaunchKernelGGL(( StripAffixes), dim3(g), dim3(b), 0, 0, device_local, token_length_device, token_division_controller_device, docs_count);
//cuda Memcpy
hipMemcpy(host_local, device_local, tokens_count * sizeof(char) * TOKEN_MAX_SIZE_PLUS_END, hipMemcpyDeviceToHost);
hipMemcpy(token_length_host, token_length_device, tokens_count*sizeof(unsigned int), hipMemcpyDeviceToHost);
return;
}
__global__ void StripAffixes(char *dev_res, unsigned int *token_length, CalcFreqController *controller, int docs_count)
{
int numBags = MAX_THREADS;
int doc = blockIdx.x;
int tkn = threadIdx.x;
if(tkn < MAX_THREADS && doc < docs_count)
{
__shared__ char tokens[TOKEN_MAX_SIZE_PLUS_END * MAX_THREADS];
// adjust the token and token_length array pointer according to controller
char *base = &dev_res[controller[doc].doc_token_start * TOKEN_MAX_SIZE_PLUS_END];
unsigned int *token_length_base = &token_length[controller[doc].doc_token_start];
int tokens_count = controller[doc].doc_token_count;
int step_count = tokens_count/numBags;
int remain = tokens_count - step_count * numBags;
int index = tkn * TOKEN_MAX_SIZE_PLUS_END;
if (tkn < remain )
step_count += 1;
int *str;
int step_size = numBags * TOKEN_MAX_SIZE_PLUS_END;
int *token;
token = (int *)&tokens[TOKEN_MAX_SIZE_PLUS_END * tkn];
int ratio = sizeof(int)/sizeof(char);
for(int i=0; i< step_count; i++, index+=step_size)
{
int tokenLength = token_length_base[index/TOKEN_MAX_SIZE_PLUS_END]/ratio + 1;
str = (int *)&base[index];
// copy to shared memory first
for (int j = 0; j != tokenLength; j++)
token[j] = str[j];
bool changed = ToLowerCase( (char *)token);
changed |= Clean( (char *)token);
changed |= stripPrefixes((char *)token);
changed |= stripSuffixes((char *)token);
if (changed){
token_length_base[index/TOKEN_MAX_SIZE_PLUS_END] = strLen((char *)token);
strCpy(&base[index], (char *)token);
}
}
}
return;
}
void MakeDocHashWrapper( char *host_local, unsigned int *token_length_host, CalcFreqController *token_division_controller_host, MyHashMapElement **hash_doc_token_sub_tables, MyHashMapElement **hash_doc_token_tables,
int sub_table_size, int table_size, int docs_count, int g, int b, int maxRows, int i, int tokens_count)
{
if(i==0)
{
//hipMalloc
hipMallocPitch(&hash_doc_token_sub_tables_device, &pitch1, sub_table_size * sizeof(MyHashMapElement), MAX_GRID_SIZE);
hipMallocPitch(&hash_doc_token_tables_device, &pitch2, table_size * sizeof(MyHashMapElement), docs_count);
}
dbg printf("loop %d, pitch2 %d\n", i, pitch2);
//kernel call
hipLaunchKernelGGL(( MakeDocHash), dim3(g), dim3(b), 0, 0, device_local, token_length_device, &(token_division_controller_device[i]), hash_doc_token_sub_tables_device, (MyHashMapElement *)((char *)hash_doc_token_tables_device + i*pitch2), sub_table_size, table_size, maxRows, pitch1, pitch2);
hipLaunchKernelGGL(( MakeDocHash2), dim3(g), dim3(b), 0, 0, device_local, token_length_device, &(token_division_controller_device[i]), hash_doc_token_sub_tables_device, (MyHashMapElement *)((char *)hash_doc_token_tables_device + i*pitch2), sub_table_size, table_size, maxRows, pitch1, pitch2);
if(maxRows != 16)
{
//cuda Memcpy
hipMemcpy(host_local, device_local, tokens_count * sizeof(char) * TOKEN_MAX_SIZE_PLUS_END, hipMemcpyDeviceToHost);
hipMemcpy(token_length_host, token_length_device, tokens_count * sizeof(unsigned int), hipMemcpyDeviceToHost);
}
}
__global__ void MakeDocHash(char *dev_mem, unsigned int *token_length, CalcFreqController *controller,
MyHashMapElement *hash_doc_token_sub_tables, MyHashMapElement *hash_doc_token_tables, int sub_table_size, int table_size, int maxRows, size_t pitch1, size_t pitch2)
{
int maxCols = HASH_DOC_TOKEN_NUM_THREADS;
int col = threadIdx.x;//z
int row = blockIdx.x;//x
if(col < HASH_DOC_TOKEN_NUM_THREADS && row < maxRows)
{
char *token_base = &dev_mem[controller[row].doc_token_start * TOKEN_MAX_SIZE_PLUS_END];
unsigned int *token_length_base = &token_length[controller[row].doc_token_start];
MyHashMapElement *hash_doc_token_sub_table;
hash_doc_token_sub_table = (MyHashMapElement *)((char*)hash_doc_token_sub_tables + row * pitch1) + (sub_table_size * col / HASH_DOC_TOKEN_NUM_THREADS);
MyHashMapElement *hash_doc_token_table;
hash_doc_token_table = (MyHashMapElement *)((char *) hash_doc_token_tables + row * pitch2);
{// clear the doc hash sub table in each thread
initHashTableCuda(hash_doc_token_sub_table, HASH_DOC_TOKEN_SUB_TABLE_SIZE, HASH_DOC_TOKEN_BUCKET_SUB_SIZE);
// clear the doc hash table
int bucketsPerThread = HASH_DOC_TOKEN_TABLE_SIZE / maxCols;//256/64 = 4
if (col < HASH_DOC_TOKEN_TABLE_SIZE % maxCols)
bucketsPerThread += 1;
MyHashMapElement *bucket = (MyHashMapElement *)hash_doc_token_table + col * HASH_DOC_TOKEN_BUCKET_SIZE;
for (int i = 0; i != bucketsPerThread; i++)
{
bucket->countInBuc = 0;
dbg{
bucket->key = 0xDEADBEEF;
bucket->subkey = 0;
bucket->freq = 0;
bucket->tokenLength = 0;
for (int j = 1; j != HASH_DOC_TOKEN_BUCKET_SIZE; j++)
{
(bucket+j)->countInBuc = 0;
(bucket+j)->freq = j;
(bucket+j)->subkey = 0;
(bucket+j)->key = 0xDEADBEAF;
(bucket+j)->tokenLength = 0;
}
}
bucket += maxCols * HASH_DOC_TOKEN_BUCKET_SIZE;
}
}
int tokens_count = controller[row].doc_token_count;
int step_count = tokens_count/maxCols;
int remain = tokens_count - step_count * maxCols;
int index = col * TOKEN_MAX_SIZE_PLUS_END;
if (col < remain )
step_count += 1;
// int *str;
int step_size = maxCols * TOKEN_MAX_SIZE_PLUS_END;
for(int i=0; i< step_count; i++, index+=step_size)
{
unsigned long key = computeHashCuda(&token_base[index]);
insertElementCuda(hash_doc_token_sub_table, key, HASH_DOC_TOKEN_SUB_TABLE_SIZE_LOG2, HASH_DOC_TOKEN_BUCKET_SUB_SIZE, token_length_base[index/TOKEN_MAX_SIZE_PLUS_END], 1);
}
// dbg printf("Done %d,%d\n",row,col);
}
return;
}
__global__ void MakeDocHash2(char *dev_mem, unsigned int *token_length, CalcFreqController *controller,
MyHashMapElement *hash_doc_token_sub_tables, MyHashMapElement *hash_doc_token_tables, int sub_table_size, int table_size, int maxRows, size_t pitch1, size_t pitch2)
{
int col = threadIdx.x;//z
int row = blockIdx.x;//x
if(col < HASH_DOC_TOKEN_NUM_THREADS && row < maxRows)
{
MyHashMapElement *hash_doc_token_sub_table;
hash_doc_token_sub_table = (MyHashMapElement *)((char*) hash_doc_token_sub_tables + row * pitch1);
__shared__ MyHashMapElement *hash_doc_token_table;
hash_doc_token_table = (MyHashMapElement *)((char*) hash_doc_token_tables + row * pitch2);
hash_doc_token_sub_table += (sub_table_size * col / HASH_DOC_TOKEN_NUM_THREADS);
// merge sub tables into one doc hash table
hash_doc_token_sub_table = (MyHashMapElement *)((char*) hash_doc_token_sub_tables + row * pitch1);
hash_doc_token_sub_table += (col * HASH_DOC_TOKEN_BUCKET_SUB_SIZE);
for (int i = 0; i != HASH_DOC_TOKEN_NUM_THREADS; i++)
{
MyHashMapElement *bucket = hash_doc_token_sub_table;
int numInBucket = bucket->countInBuc;
while(numInBucket--)
{
unsigned long key = bucket->key;
insertElementCuda(hash_doc_token_table, key, HASH_DOC_TOKEN_TABLE_SIZE_LOG2, HASH_DOC_TOKEN_BUCKET_SIZE, bucket->tokenLength, bucket->freq);
bucket++;
}
hash_doc_token_sub_table += HASH_DOC_TOKEN_SUB_TABLE_SIZE * HASH_DOC_TOKEN_BUCKET_SUB_SIZE;
}
}
return;
}
__global__ void InitOccTable(MyHashMapElement *occ_hash_table)
{
int maxCols = 32;
int col = threadIdx.x;//z
int row = blockIdx.x;//x
if(col < maxCols && row < HASH_DOC_TOKEN_TABLE_SIZE/32)
{
MyHashMapElement *bucket = &occ_hash_table[((row * maxCols ) + col) * OCC_HASH_TABLE_BUCKET_SIZE];
bucket->countInBuc = 0;
dbg{
bucket->key = 0xDEADBEEF;
bucket->freq = 0;
bucket->tokenLength = 0;
bucket->subkey = 0;
for (int j = 1; j < OCC_HASH_TABLE_BUCKET_SIZE; j++)
{
bucket[j].countInBuc = 0;
bucket[j].key = 0xDEADBEEF;
bucket[j].freq = 0;
bucket[j].tokenLength = 0;
bucket[j].subkey = 0;
}
}
}
}
void AddToOccTableWrapper(MyHashMapElement **hash_doc_token_tables, MyHashMapElement *occ_hash_table, int numDocs, int occ_table_size, int g, int b, int table_size)
{
//hipMalloc
hipMalloc(&occ_hash_table_device, occ_table_size * sizeof(MyHashMapElement));
//hipMemcpy
hipMemcpy(occ_hash_table_device, occ_hash_table, occ_table_size * sizeof(MyHashMapElement), hipMemcpyHostToDevice);
//kernel call
hipLaunchKernelGGL(( InitOccTable), dim3(g), dim3(b), 0, 0, occ_hash_table_device);
hipLaunchKernelGGL(( AddToOccTable), dim3(g), dim3(b), 0, 0, hash_doc_token_tables_device, occ_hash_table_device, numDocs, pitch2);
//hipMemcpy
hipMemcpy(occ_hash_table, occ_hash_table_device, occ_table_size * sizeof(MyHashMapElement),hipMemcpyDeviceToHost);
}
__global__ void AddToOccTable(MyHashMapElement *hash_doc_token_tables, MyHashMapElement *occ_hash_table, int numDocs, size_t pitch2)
{
int maxCols = 32;
int col = threadIdx.x;//z
int row = blockIdx.x;//x
if(col < maxCols && row < HASH_DOC_TOKEN_TABLE_SIZE/32)
{
for (int i = 0; i != numDocs; i++)
{
MyHashMapElement *hash_doc_token_table = (MyHashMapElement *)((char*)hash_doc_token_tables + i * pitch2);
MyHashMapElement *bucket = &hash_doc_token_table[(row * maxCols + col) * HASH_DOC_TOKEN_BUCKET_SIZE];
int numInBucket = bucket->countInBuc;
while (numInBucket--)
{
unsigned long key = bucket->key;
insertElementCuda(occ_hash_table, key, OCC_HASH_TABLE_SIZE_LOG2, OCC_HASH_TABLE_BUCKET_SIZE, bucket->tokenLength, 1);
bucket++;
}
}
}
}
__global__ void CalcTfIdf(CalcFreqController *controller, MyHashMapElement *hash_doc_token_tables, MyHashMapElement *occ_hash_table, int docs_count, float *bucket_sqrt_sum, size_t pitch2)
{
int maxCols = HASH_DOC_TOKEN_TABLE_SIZE;
int col = threadIdx.x;//z
int row = blockIdx.x;//x
if(row < docs_count && col < maxCols)
{
int token_doc_count = controller[row].doc_token_count;
// 1. calculate the un-normalized tfidf
MyHashMapElement *bucket = (MyHashMapElement *)((char *)hash_doc_token_tables + row * pitch2);
bucket += col * HASH_DOC_TOKEN_BUCKET_SIZE;
int numInBucket = bucket->countInBuc;
__shared__ float bucketSqrtSum[HASH_DOC_TOKEN_TABLE_SIZE];
bucketSqrtSum[col] = 0.0f;
while (numInBucket--)
{
unsigned long key = bucket->key;
int occ = findElementCuda(occ_hash_table, key, OCC_HASH_TABLE_SIZE_LOG2, OCC_HASH_TABLE_BUCKET_SIZE, bucket->tokenLength);
if (occ != 0) // we should be able to find it in the occ table
{
float tf = (float)bucket->freq/token_doc_count;
float idf = log(float(docs_count)/occ);
bucket->tfidf = tf * idf;
bucketSqrtSum[col] += bucket->tfidf * bucket->tfidf;
dbg {
bucket->subkey = occ;
}
}
bucket++;
}
__syncthreads();
if(col == 0)
{
float sum = 0.0f;
for(int i = 0; i < maxCols; i++)
sum += bucketSqrtSum[i];
bucket_sqrt_sum[row] = sqrt(sum);
}
}
}
__global__ void CalcTfIdf2(CalcFreqController *controller, MyHashMapElement *hash_doc_token_tables, MyHashMapElement *occ_hash_table, int docs_count, float *bucket_sqrt_sum, size_t pitch2)
{
int maxCols = HASH_DOC_TOKEN_TABLE_SIZE;
int col = threadIdx.x;//z
int row = blockIdx.x;//x
if(row < docs_count && col < maxCols)
{
// pthread_barrier_wait();
// normalize
float magnitude = bucket_sqrt_sum[row];
MyHashMapElement *bucket = (MyHashMapElement *)((char *)hash_doc_token_tables + row * pitch2);
bucket += col * HASH_DOC_TOKEN_BUCKET_SIZE;
int numInBucket = bucket->countInBuc;
while (numInBucket--)
{
float tfidf = (float)bucket->tfidf;
tfidf = tfidf / magnitude;
bucket->tfidf = tfidf;
bucket++;
}
}
}
void CalcTfidfWrapper(CalcFreqController *token_division_controller_host, MyHashMapElement **hash_doc_token_tables_host, MyHashMapElement *occ_hash_table_remote, int docs_count, float *bucket_sqrt_sum, int g, int b, int table_size)
{
//hipMalloc
hipMalloc(&bucket_sqrt_sum_device, HASH_DOC_TOKEN_TABLE_SIZE * sizeof(float));
//kernel calls
hipLaunchKernelGGL(( CalcTfIdf), dim3(g), dim3(b), 0, 0, token_division_controller_device, hash_doc_token_tables_device, occ_hash_table_device, docs_count, bucket_sqrt_sum_device, pitch2);
hipLaunchKernelGGL(( CalcTfIdf2), dim3(g), dim3(b), 0, 0, token_division_controller_device, hash_doc_token_tables_device, occ_hash_table_device, docs_count, bucket_sqrt_sum_device, pitch2);
//hipMemcpy
for(int j=0; j< docs_count;j++)
hipMemcpy(hash_doc_token_tables_host[j], (MyHashMapElement *)((char*)hash_doc_token_tables_device + j * pitch2), table_size * sizeof(MyHashMapElement), hipMemcpyDeviceToHost);
}
__global__ void CalcSimilarities(MyHashMapElement *hash_doc_token_tables, MyHashMapElement *occ_hash_table_remote, float *similarity_matrix, int docs_count, size_t pitch2)
{
int col = threadIdx.x;
int row = blockIdx.x;
int row2 = blockIdx.y;
int maxCols = HASH_DOC_TOKEN_TABLE_SIZE;
if(col < HASH_DOC_TOKEN_TABLE_SIZE && row < docs_count && row2 < docs_count)
{
MyHashMapElement *hashDoc_token_table1 = (MyHashMapElement *)((char *)hash_doc_token_tables + row * pitch2);
MyHashMapElement *hashDoc_token_table2 = (MyHashMapElement *)((char *)hash_doc_token_tables + row2 * pitch2);
__shared__ float sim_sum[HASH_DOC_TOKEN_TABLE_SIZE];
sim_sum[col] = 0.0f;
MyHashMapElement *bucket1 = hashDoc_token_table1 + col * HASH_DOC_TOKEN_BUCKET_SIZE;
int num_ele_1 = bucket1->countInBuc;
while (num_ele_1--)
{
MyHashMapElement *bucket2 = hashDoc_token_table2 + col * HASH_DOC_TOKEN_BUCKET_SIZE;
int num_ele_2 = bucket2->countInBuc;
int find = 0;
while (num_ele_2--)
{
if ((bucket2->key == bucket1->key) && (bucket2->tokenLength == bucket1->tokenLength))
{
find = 1;
break;
}
bucket2++;
}
if (find)
sim_sum[col] += bucket1->tfidf * bucket2->tfidf;
bucket1++;
}
__syncthreads();
if(col == 0)
{
float sum = 0.0f;
for(int i = 0; i < maxCols; i++)
sum += sim_sum[i];
similarity_matrix[docs_count * row + row2] = sum;
}
}
}
void CalcSimilaritiesWrapper(MyHashMapElement **hash_doc_token_tables_host, MyHashMapElement *occ_hash_table_remote, float *doc_similarity_matrix_host, int docs_count, int g, int b)
{
//hipMalloc
hipMalloc(&doc_similarity_matrix_device, docs_count * docs_count * sizeof(float));
dim3 threadsPerBlock(b, b);
dim3 numBlocks(g/2,g/2);
//kernel calls
hipLaunchKernelGGL(( CalcSimilarities), dim3(numBlocks), dim3(b), 0, 0, hash_doc_token_tables_device, occ_hash_table_device, doc_similarity_matrix_device, docs_count, pitch2);
//hipMemcpy
hipMemcpy(doc_similarity_matrix_host, doc_similarity_matrix_device, docs_count * docs_count * sizeof(float),hipMemcpyDeviceToHost);
}
__global__ void SortSimilarities2(float *similarity_matrix, int *rank_matrix, int docs_count, float *simbase, int *rankbase)
{
int col = threadIdx.x;
int row = blockIdx.x;
if(col < docs_count && row < docs_count)
{
simbase = (float *)similarity_matrix+row*docs_count;
rankbase = (int *)rank_matrix+row * docs_count;
float my_value = *((float *)simbase+col);
int myRank = 0;
for (int i = 0; i != docs_count; i++)
{
if (i == col)
continue;
if (*((float *)simbase+i) > my_value)
myRank = myRank + 1;
}
*((int *)rankbase+col) = myRank;
}
}
void SortSimilarities2Wrapper(float *doc_similarity_matrix_host, int *doc_rank_matrix_host, int docs_count, int g, int b)
{
//hipMalloc
hipMalloc(&doc_rank_matrix_device, docs_count * docs_count * sizeof(int));
hipMalloc(&simbase, docs_count*sizeof(float));
hipMalloc(&rankbase, docs_count*sizeof(int));
//kernel call
hipLaunchKernelGGL(( SortSimilarities2), dim3(g), dim3(b), 0, 0, doc_similarity_matrix_device, doc_rank_matrix_device, docs_count, simbase, rankbase);
//hipMemcpy
hipMemcpy(doc_rank_matrix_host, doc_rank_matrix_device, docs_count * docs_count * sizeof(int), hipMemcpyDeviceToHost);
//hipFree
hipFree(&doc_rank_matrix_device);
hipFree(&doc_similarity_matrix_device);
hipFree(&hash_doc_token_tables_device);
hipFree(&occ_hash_table_device);
hipFree(&token_division_controller_device);
hipFree(&bucket_sqrt_sum_device);
hipFree(device_local);
hipFree(token_length_device);
hipFree(hash_doc_token_sub_tables_device);
}
|
015b731b3b73aac1f2745954601073e04e476af7.cu
|
/*BSD License
Copyright © belongs to the uploader, all rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, with the name of the uploader, and this list of conditions;
Redistributions in binary form must reproduce the above copyright notice, with the name of the uploader, and this list of conditions in the documentation and/or other materials provided with the distribution;
Neither the name of the uploader nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
*/
#include <stdio.h>
#include <math.h>
#include "string_funcs.cu"
//#include "hash_funcs.cpp"
#include "defs.h"
//#include "hash_funcs.h"
//#include "cuda_k.h"
#include "hash_funcs_cuda.h"
char *device_local;
unsigned int *token_length_device;
CalcFreqController *token_division_controller_device;
MyHashMapElement *hash_doc_token_sub_tables_device;
MyHashMapElement *hash_doc_token_tables_device;
MyHashMapElement *occ_hash_table_device;
float *bucket_sqrt_sum_device;
float *doc_similarity_matrix_device;
int *doc_rank_matrix_device;
__device__ bool stripPrefixes ( char *str);
__global__ void StripAffixes(char *dev_res, unsigned int *token_length, CalcFreqController *controller, int docs_count);
__global__ void MakeDocHash2(char *dev_mem, unsigned int *token_length, CalcFreqController *controller,
MyHashMapElement *hash_doc_token_sub_tables, MyHashMapElement *hash_doc_token_tables, int sub_table_size, int table_size, int maxRows, size_t pitch1, size_t pitch2);
__global__ void MakeDocHash(char *dev_mem, unsigned int *token_length, CalcFreqController *controller,
MyHashMapElement *hash_doc_token_sub_tables, MyHashMapElement *hash_doc_token_tables, int sub_table_size, int table_size, int maxRows, size_t pitch1, size_t pitch2);
size_t pitch1;
size_t pitch2;
__global__ void AddToOccTable(MyHashMapElement *hash_doc_token_tables, MyHashMapElement *occ_hash_table, int numDocs, size_t pitch2);
float *simbase;
int *rankbase;
#define TRUE 1
#define FALSE 0
#define tablesize PACKET_SIZE
//int strCmp(char *str1, char *str2);
/* This is djb2 hashing algorithm by Dan Bernstien, from comp.lang.c*/
__device__ unsigned long computeHashCuda(char *str)
{
#if 1
unsigned long hash = 5381;
int c;
while (c = *str++)
hash = ((hash << 5) + hash) + c; // hash * 33 + c
return hash;
#else
unsigned long hash = 0;
int c;
int i = 0;
while (c = *str++)
{
hash = hash * i + c;
i++;
}
return hash;
#endif
}
// my stuff
__device__ void initHashTableCuda(MyHashMapElement *hme, int tablerange, int subrange)
{
MyHashMapElement *bucket = hme;
for (int i = 0; i != tablerange; i++)
{
bucket->countInBuc = 0;
/*dbg{
bucket->freq = 0; // TODO not necessary
bucket->key = 0xDEADBEAF;
bucket->tokenLength = 0;
bucket->subkey = 0;
for (int j = 0; j < subrange; j++)
{
(bucket+j)->countInBuc = 0;
(bucket+j)->freq = 0;
(bucket+j)->key = 0xDEADBEAF;
(bucket+j)->tokenLength = 0;
}
}*/
bucket += subrange;
}
}
__device__ bool insertElementCuda(MyHashMapElement *hme, unsigned long key, int keyshift, int bucketsize, int strlength, int initvalue)
{
unsigned long newkey = key & ( (1 << keyshift) - 1 ); // clear the MSBs
MyHashMapElement *bucket = &hme[newkey * bucketsize];
int numEleInBucket = bucket->countInBuc;
// search if the same element is in the bucket, if in, incr the frequency
for (int i = 0; i != numEleInBucket; i++)
{
if (bucket[i].key == key && bucket[i].tokenLength == strlength)
{
bucket[i].freq+=initvalue;
return true;
}
}
if (numEleInBucket == bucketsize) return false; // if bucket full, drop the element TODO
bucket[0].countInBuc++;
bucket[numEleInBucket].key = key;
bucket[numEleInBucket].freq = initvalue;
bucket[numEleInBucket].tokenLength = strlength;
dbg{
bucket[numEleInBucket].subkey = newkey;
bucket[numEleInBucket].countInBuc = numEleInBucket + 1;
}
return true;
// bucket[numEleInBucket].docIndex =
// bucket[numEleInBucket].tokenLength =
}
__device__ int findElementCuda(MyHashMapElement *hme, unsigned long key, int keyshift, int bucketsize, int strlength)
{
unsigned long newkey = key & ( (1 << keyshift) - 1 ); // clear the MSBs
MyHashMapElement *bucket = &hme[newkey * bucketsize];
int numEleInBucket = bucket->countInBuc;
// search if the same element is in the bucket, if in, incr the frequency
for (int i = 0; i != numEleInBucket; i++)
{
if (bucket[i].key == key && bucket[i].tokenLength == strlength)
return bucket[i].freq;
}
return 0;
}
__constant__ char prefixes[][16]= { "kilo", "micro", "milli", "intra", "ultra", "mega", "nano", "pico", "pseudo"};
__constant__ char suffixes2[][2][16] = { { "ational", "ate" },
{ "tional", "tion" },
{ "enci", "ence" },
{ "anci", "ance" },
{ "izer", "ize" },
{ "iser", "ize" },
{ "abli", "able" },
{ "alli", "al" },
{ "entli", "ent" },
{ "eli", "e" },
{ "ousli", "ous" },
{ "ization", "ize" },
{ "isation", "ize" },
{ "ation", "ate" },
{ "ator", "ate" },
{ "alism", "al" },
{ "iveness", "ive" },
{ "fulness", "ful" },
{ "ousness", "ous" },
{ "aliti", "al" },
{ "iviti", "ive" },
{ "biliti", "ble" }};
__constant__ char suffixes3[][2][16] = { { "icate", "ic" },
{ "ative", "" },
{ "alize", "al" },
{ "alise", "al" },
{ "iciti", "ic" },
{ "ical", "ic" },
{ "ful", "" },
{ "ness", "" }};
__constant__ char suffixes4[][16] = { "al",
"ance",
"ence",
"er",
"ic",
"able", "ible", "ant", "ement", "ment", "ent", "sion", "tion",
"ou", "ism", "ate", "iti", "ous", "ive", "ize", "ise"};
__device__ bool step1(char *str ) {
char stem[32];
bool changed = false;
if ( str[strLen(str)-1] == 's' ) {
if ( (hasSuffix( str, "sses", stem ))
|| (hasSuffix( str, "ies", stem)) ){
str[strLen(str)-2] = '\0';
changed = true;
}
else {
if ( ( strLen(str) == 1 )
&& ( str[strLen(str)-1] == 's' ) ) {
str[0] = '\0';
return true;
}
if ( str[strLen(str)-2 ] != 's' ) {
str[strLen(str)-1] = '\0';
changed = true;
}
}
}
if ( hasSuffix( str,"eed",stem ) ) {
if ( measure( stem ) > 0 ) {
str[strLen(str)-1] = '\0';
changed = true;
}
}
else {
if ( (hasSuffix( str,"ed",stem ))
|| (hasSuffix( str,"ing",stem )) ) {
if (containsVowel( stem )) {
if(stem[0]=='\0')
{
str[0]='\0';
changed = true;
}
else
{
str[strLen(stem)] = '\0';
changed = true;
}
if ( strLen(str) == 1 )
return changed;
if ( ( hasSuffix( str,"at",stem) )
|| ( hasSuffix( str,"bl",stem ) )
|| ( hasSuffix( str,"iz",stem) ) ) {
int len = strLen(str);
str[len-1] = 'e';
str[len] = '\0';
changed = true;
}
else {
int length = strLen(str);
if ( (str[length-1] == str[length-2])
&& (str[length-1] != 'l')
&& (str[length-1] != 's')
&& (str[length-1] != 'z') ) {
str[length-1]='\0';
changed = true;
}
else
if ( measure( str ) == 1 ) {
if ( cvc(str) )
{
str[length-1]='e';
str[length]='\0';
changed = true;
}
}
}
}
}
}
if ( hasSuffix(str,"y",stem) )
if ( containsVowel( stem ) ) {
int len = strLen(str);
str[len-1]='i';
str[len]='\0';
changed = true;
}
return changed;
}
__device__ bool step2( char *str ) {
char stem[32];
int last = sizeof(suffixes2)/(sizeof(char)*2*16); //strange way of calculating length of array
bool changed = false;
for ( int index = 0 ; index < last; index++ ) {
if ( hasSuffix ( str, suffixes2[index][0], stem ) ) {
if ( measure ( stem ) > 0 ) {
int stemlen, suffixlen, j;
stemlen = strLen(stem);
suffixlen = strLen(suffixes2[index][1]);
changed = true;
for(j=0; j<suffixlen; j++)
str[stemlen+j] = suffixes2[index][1][j];
str[stemlen+j] = '\0';
}
}
}
return changed;
}
__device__ bool step3( char *str ) {
char stem[32];
int last = sizeof(suffixes3)/(sizeof(char)*2*16); //strange way of calculating length of array/
bool changed= false;
for ( int index = 0 ; index<last; index++ ) {
if ( hasSuffix ( str, suffixes3[index][0], stem ))
if ( measure ( stem ) > 0 ) {
int stemlen, suffixlen, j;
stemlen = strLen(stem);
suffixlen = strLen(suffixes3[index][1]);
changed = true;
for( j=0; j<suffixlen; j++)
str[stemlen+j] = suffixes3[index][1][j];
str[stemlen+j] = '\0';
}
}
return changed ;
}
__device__ bool step4( char *str ) {
char stem[32];
int last = sizeof(suffixes4)/(sizeof(char)*16); //strange way of calculating length of array
bool changed = false;
for ( int index = 0 ; index<last; index++ ) {
if ( hasSuffix ( str, suffixes4[index], stem ) ) {
changed = true;
if ( measure ( stem ) > 1 ) {
str[strLen(stem)] = '\0';
}
}
}
return changed;
}
__device__ bool step5( char *str ) {
bool changed = false;
if ( str[strLen(str)-1] == 'e' ) {
if ( measure(str) > 1 ) {
str[strLen(str)-1] = '\0';
changed = true;
}
else
if ( measure(str) == 1 ) {
char stem[32];
int i;
for ( i=0; i<strLen(str)-1; i++ )
stem[i] = str[i];
stem[i] = '\0';
if ( !cvc(stem) ){
str[strLen(str)-1] = '\0';
changed = true;
}
}
}
if ( strLen(str) == 1 )
return true;
if ( (str[strLen(str)-1] == 'l')
&& (str[strLen(str)-2] == 'l') && (measure(str) > 1) )
if ( measure(str) > 1 ) {
str[strLen(str)-1] = '\0';
changed = true;
}
return changed;
}
__device__ bool stripSuffixes(char *str ) {
bool changed = false;
changed = step1( str );
if ( strLen(str) >= 1 )
changed |= step2( str );
if ( strLen(str) >= 1 )
changed |= step3( str );
if ( strLen(str) >= 1 )
changed |= step4( str );
if ( strLen(str) >= 1 )
changed |= step5( str );
return changed;
}
__device__ bool stripPrefixes ( char *str) {
int newLen, j;
bool found = false;
int last = sizeof(prefixes)/(sizeof(char)*16); //strange way of calculating length of array
for ( int i=0 ; i<last; i++ )
{
//Find if str starts with prefix prefixes[i]
found = prefixFind(str, prefixes[i]);
if (found)
{
newLen = strLen(str) - strLen(prefixes[i]);
for (j=0 ; j < newLen; j++ )
str[j] = str[j+strLen(prefixes[i])];
str[j] = '\0';
}
}
return found;
}
void StripAffixesWrapper(char *host_local, unsigned int *token_length_host, CalcFreqController *token_division_controller_host, int tokens_count, int docs_count, int g, int b)
{
//cudaMalloc
cudaMalloc(&device_local, tokens_count * sizeof(char) * TOKEN_MAX_SIZE_PLUS_END);
cudaMalloc(&token_length_device, tokens_count*sizeof(unsigned int));
cudaMalloc(&token_division_controller_device, docs_count * sizeof(CalcFreqController));
//cuda Mempcpy
cudaMemcpy(device_local, host_local, tokens_count * sizeof(char) * TOKEN_MAX_SIZE_PLUS_END, cudaMemcpyHostToDevice);
cudaMemcpy(token_length_device, token_length_host, tokens_count*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(token_division_controller_device, token_division_controller_host, docs_count * sizeof(CalcFreqController), cudaMemcpyHostToDevice);
//kernel call
StripAffixes<<<g, b>>>(device_local, token_length_device, token_division_controller_device, docs_count);
//cuda Memcpy
cudaMemcpy(host_local, device_local, tokens_count * sizeof(char) * TOKEN_MAX_SIZE_PLUS_END, cudaMemcpyDeviceToHost);
cudaMemcpy(token_length_host, token_length_device, tokens_count*sizeof(unsigned int), cudaMemcpyDeviceToHost);
return;
}
__global__ void StripAffixes(char *dev_res, unsigned int *token_length, CalcFreqController *controller, int docs_count)
{
int numBags = MAX_THREADS;
int doc = blockIdx.x;
int tkn = threadIdx.x;
if(tkn < MAX_THREADS && doc < docs_count)
{
__shared__ char tokens[TOKEN_MAX_SIZE_PLUS_END * MAX_THREADS];
// adjust the token and token_length array pointer according to controller
char *base = &dev_res[controller[doc].doc_token_start * TOKEN_MAX_SIZE_PLUS_END];
unsigned int *token_length_base = &token_length[controller[doc].doc_token_start];
int tokens_count = controller[doc].doc_token_count;
int step_count = tokens_count/numBags;
int remain = tokens_count - step_count * numBags;
int index = tkn * TOKEN_MAX_SIZE_PLUS_END;
if (tkn < remain )
step_count += 1;
int *str;
int step_size = numBags * TOKEN_MAX_SIZE_PLUS_END;
int *token;
token = (int *)&tokens[TOKEN_MAX_SIZE_PLUS_END * tkn];
int ratio = sizeof(int)/sizeof(char);
for(int i=0; i< step_count; i++, index+=step_size)
{
int tokenLength = token_length_base[index/TOKEN_MAX_SIZE_PLUS_END]/ratio + 1;
str = (int *)&base[index];
// copy to shared memory first
for (int j = 0; j != tokenLength; j++)
token[j] = str[j];
bool changed = ToLowerCase( (char *)token);
changed |= Clean( (char *)token);
changed |= stripPrefixes((char *)token);
changed |= stripSuffixes((char *)token);
if (changed){
token_length_base[index/TOKEN_MAX_SIZE_PLUS_END] = strLen((char *)token);
strCpy(&base[index], (char *)token);
}
}
}
return;
}
void MakeDocHashWrapper( char *host_local, unsigned int *token_length_host, CalcFreqController *token_division_controller_host, MyHashMapElement **hash_doc_token_sub_tables, MyHashMapElement **hash_doc_token_tables,
int sub_table_size, int table_size, int docs_count, int g, int b, int maxRows, int i, int tokens_count)
{
if(i==0)
{
//cudaMalloc
cudaMallocPitch(&hash_doc_token_sub_tables_device, &pitch1, sub_table_size * sizeof(MyHashMapElement), MAX_GRID_SIZE);
cudaMallocPitch(&hash_doc_token_tables_device, &pitch2, table_size * sizeof(MyHashMapElement), docs_count);
}
dbg printf("loop %d, pitch2 %d\n", i, pitch2);
//kernel call
MakeDocHash<<<g, b>>>(device_local, token_length_device, &(token_division_controller_device[i]), hash_doc_token_sub_tables_device, (MyHashMapElement *)((char *)hash_doc_token_tables_device + i*pitch2), sub_table_size, table_size, maxRows, pitch1, pitch2);
MakeDocHash2<<<g, b>>>(device_local, token_length_device, &(token_division_controller_device[i]), hash_doc_token_sub_tables_device, (MyHashMapElement *)((char *)hash_doc_token_tables_device + i*pitch2), sub_table_size, table_size, maxRows, pitch1, pitch2);
if(maxRows != 16)
{
//cuda Memcpy
cudaMemcpy(host_local, device_local, tokens_count * sizeof(char) * TOKEN_MAX_SIZE_PLUS_END, cudaMemcpyDeviceToHost);
cudaMemcpy(token_length_host, token_length_device, tokens_count * sizeof(unsigned int), cudaMemcpyDeviceToHost);
}
}
__global__ void MakeDocHash(char *dev_mem, unsigned int *token_length, CalcFreqController *controller,
MyHashMapElement *hash_doc_token_sub_tables, MyHashMapElement *hash_doc_token_tables, int sub_table_size, int table_size, int maxRows, size_t pitch1, size_t pitch2)
{
int maxCols = HASH_DOC_TOKEN_NUM_THREADS;
int col = threadIdx.x;//z
int row = blockIdx.x;//x
if(col < HASH_DOC_TOKEN_NUM_THREADS && row < maxRows)
{
char *token_base = &dev_mem[controller[row].doc_token_start * TOKEN_MAX_SIZE_PLUS_END];
unsigned int *token_length_base = &token_length[controller[row].doc_token_start];
MyHashMapElement *hash_doc_token_sub_table;
hash_doc_token_sub_table = (MyHashMapElement *)((char*)hash_doc_token_sub_tables + row * pitch1) + (sub_table_size * col / HASH_DOC_TOKEN_NUM_THREADS);
MyHashMapElement *hash_doc_token_table;
hash_doc_token_table = (MyHashMapElement *)((char *) hash_doc_token_tables + row * pitch2);
{// clear the doc hash sub table in each thread
initHashTableCuda(hash_doc_token_sub_table, HASH_DOC_TOKEN_SUB_TABLE_SIZE, HASH_DOC_TOKEN_BUCKET_SUB_SIZE);
// clear the doc hash table
int bucketsPerThread = HASH_DOC_TOKEN_TABLE_SIZE / maxCols;//256/64 = 4
if (col < HASH_DOC_TOKEN_TABLE_SIZE % maxCols)
bucketsPerThread += 1;
MyHashMapElement *bucket = (MyHashMapElement *)hash_doc_token_table + col * HASH_DOC_TOKEN_BUCKET_SIZE;
for (int i = 0; i != bucketsPerThread; i++)
{
bucket->countInBuc = 0;
dbg{
bucket->key = 0xDEADBEEF;
bucket->subkey = 0;
bucket->freq = 0;
bucket->tokenLength = 0;
for (int j = 1; j != HASH_DOC_TOKEN_BUCKET_SIZE; j++)
{
(bucket+j)->countInBuc = 0;
(bucket+j)->freq = j;
(bucket+j)->subkey = 0;
(bucket+j)->key = 0xDEADBEAF;
(bucket+j)->tokenLength = 0;
}
}
bucket += maxCols * HASH_DOC_TOKEN_BUCKET_SIZE;
}
}
int tokens_count = controller[row].doc_token_count;
int step_count = tokens_count/maxCols;
int remain = tokens_count - step_count * maxCols;
int index = col * TOKEN_MAX_SIZE_PLUS_END;
if (col < remain )
step_count += 1;
// int *str;
int step_size = maxCols * TOKEN_MAX_SIZE_PLUS_END;
for(int i=0; i< step_count; i++, index+=step_size)
{
unsigned long key = computeHashCuda(&token_base[index]);
insertElementCuda(hash_doc_token_sub_table, key, HASH_DOC_TOKEN_SUB_TABLE_SIZE_LOG2, HASH_DOC_TOKEN_BUCKET_SUB_SIZE, token_length_base[index/TOKEN_MAX_SIZE_PLUS_END], 1);
}
// dbg printf("Done %d,%d\n",row,col);
}
return;
}
__global__ void MakeDocHash2(char *dev_mem, unsigned int *token_length, CalcFreqController *controller,
MyHashMapElement *hash_doc_token_sub_tables, MyHashMapElement *hash_doc_token_tables, int sub_table_size, int table_size, int maxRows, size_t pitch1, size_t pitch2)
{
int col = threadIdx.x;//z
int row = blockIdx.x;//x
if(col < HASH_DOC_TOKEN_NUM_THREADS && row < maxRows)
{
MyHashMapElement *hash_doc_token_sub_table;
hash_doc_token_sub_table = (MyHashMapElement *)((char*) hash_doc_token_sub_tables + row * pitch1);
__shared__ MyHashMapElement *hash_doc_token_table;
hash_doc_token_table = (MyHashMapElement *)((char*) hash_doc_token_tables + row * pitch2);
hash_doc_token_sub_table += (sub_table_size * col / HASH_DOC_TOKEN_NUM_THREADS);
// merge sub tables into one doc hash table
hash_doc_token_sub_table = (MyHashMapElement *)((char*) hash_doc_token_sub_tables + row * pitch1);
hash_doc_token_sub_table += (col * HASH_DOC_TOKEN_BUCKET_SUB_SIZE);
for (int i = 0; i != HASH_DOC_TOKEN_NUM_THREADS; i++)
{
MyHashMapElement *bucket = hash_doc_token_sub_table;
int numInBucket = bucket->countInBuc;
while(numInBucket--)
{
unsigned long key = bucket->key;
insertElementCuda(hash_doc_token_table, key, HASH_DOC_TOKEN_TABLE_SIZE_LOG2, HASH_DOC_TOKEN_BUCKET_SIZE, bucket->tokenLength, bucket->freq);
bucket++;
}
hash_doc_token_sub_table += HASH_DOC_TOKEN_SUB_TABLE_SIZE * HASH_DOC_TOKEN_BUCKET_SUB_SIZE;
}
}
return;
}
__global__ void InitOccTable(MyHashMapElement *occ_hash_table)
{
int maxCols = 32;
int col = threadIdx.x;//z
int row = blockIdx.x;//x
if(col < maxCols && row < HASH_DOC_TOKEN_TABLE_SIZE/32)
{
MyHashMapElement *bucket = &occ_hash_table[((row * maxCols ) + col) * OCC_HASH_TABLE_BUCKET_SIZE];
bucket->countInBuc = 0;
dbg{
bucket->key = 0xDEADBEEF;
bucket->freq = 0;
bucket->tokenLength = 0;
bucket->subkey = 0;
for (int j = 1; j < OCC_HASH_TABLE_BUCKET_SIZE; j++)
{
bucket[j].countInBuc = 0;
bucket[j].key = 0xDEADBEEF;
bucket[j].freq = 0;
bucket[j].tokenLength = 0;
bucket[j].subkey = 0;
}
}
}
}
void AddToOccTableWrapper(MyHashMapElement **hash_doc_token_tables, MyHashMapElement *occ_hash_table, int numDocs, int occ_table_size, int g, int b, int table_size)
{
//cudaMalloc
cudaMalloc(&occ_hash_table_device, occ_table_size * sizeof(MyHashMapElement));
//cudaMemcpy
cudaMemcpy(occ_hash_table_device, occ_hash_table, occ_table_size * sizeof(MyHashMapElement), cudaMemcpyHostToDevice);
//kernel call
InitOccTable<<<g, b>>>(occ_hash_table_device);
AddToOccTable<<<g, b>>>(hash_doc_token_tables_device, occ_hash_table_device, numDocs, pitch2);
//cudaMemcpy
cudaMemcpy(occ_hash_table, occ_hash_table_device, occ_table_size * sizeof(MyHashMapElement),cudaMemcpyDeviceToHost);
}
__global__ void AddToOccTable(MyHashMapElement *hash_doc_token_tables, MyHashMapElement *occ_hash_table, int numDocs, size_t pitch2)
{
int maxCols = 32;
int col = threadIdx.x;//z
int row = blockIdx.x;//x
if(col < maxCols && row < HASH_DOC_TOKEN_TABLE_SIZE/32)
{
for (int i = 0; i != numDocs; i++)
{
MyHashMapElement *hash_doc_token_table = (MyHashMapElement *)((char*)hash_doc_token_tables + i * pitch2);
MyHashMapElement *bucket = &hash_doc_token_table[(row * maxCols + col) * HASH_DOC_TOKEN_BUCKET_SIZE];
int numInBucket = bucket->countInBuc;
while (numInBucket--)
{
unsigned long key = bucket->key;
insertElementCuda(occ_hash_table, key, OCC_HASH_TABLE_SIZE_LOG2, OCC_HASH_TABLE_BUCKET_SIZE, bucket->tokenLength, 1);
bucket++;
}
}
}
}
__global__ void CalcTfIdf(CalcFreqController *controller, MyHashMapElement *hash_doc_token_tables, MyHashMapElement *occ_hash_table, int docs_count, float *bucket_sqrt_sum, size_t pitch2)
{
int maxCols = HASH_DOC_TOKEN_TABLE_SIZE;
int col = threadIdx.x;//z
int row = blockIdx.x;//x
if(row < docs_count && col < maxCols)
{
int token_doc_count = controller[row].doc_token_count;
// 1. calculate the un-normalized tfidf
MyHashMapElement *bucket = (MyHashMapElement *)((char *)hash_doc_token_tables + row * pitch2);
bucket += col * HASH_DOC_TOKEN_BUCKET_SIZE;
int numInBucket = bucket->countInBuc;
__shared__ float bucketSqrtSum[HASH_DOC_TOKEN_TABLE_SIZE];
bucketSqrtSum[col] = 0.0f;
while (numInBucket--)
{
unsigned long key = bucket->key;
int occ = findElementCuda(occ_hash_table, key, OCC_HASH_TABLE_SIZE_LOG2, OCC_HASH_TABLE_BUCKET_SIZE, bucket->tokenLength);
if (occ != 0) // we should be able to find it in the occ table
{
float tf = (float)bucket->freq/token_doc_count;
float idf = log(float(docs_count)/occ);
bucket->tfidf = tf * idf;
bucketSqrtSum[col] += bucket->tfidf * bucket->tfidf;
dbg {
bucket->subkey = occ;
}
}
bucket++;
}
__syncthreads();
if(col == 0)
{
float sum = 0.0f;
for(int i = 0; i < maxCols; i++)
sum += bucketSqrtSum[i];
bucket_sqrt_sum[row] = sqrt(sum);
}
}
}
__global__ void CalcTfIdf2(CalcFreqController *controller, MyHashMapElement *hash_doc_token_tables, MyHashMapElement *occ_hash_table, int docs_count, float *bucket_sqrt_sum, size_t pitch2)
{
int maxCols = HASH_DOC_TOKEN_TABLE_SIZE;
int col = threadIdx.x;//z
int row = blockIdx.x;//x
if(row < docs_count && col < maxCols)
{
// pthread_barrier_wait();
// normalize
float magnitude = bucket_sqrt_sum[row];
MyHashMapElement *bucket = (MyHashMapElement *)((char *)hash_doc_token_tables + row * pitch2);
bucket += col * HASH_DOC_TOKEN_BUCKET_SIZE;
int numInBucket = bucket->countInBuc;
while (numInBucket--)
{
float tfidf = (float)bucket->tfidf;
tfidf = tfidf / magnitude;
bucket->tfidf = tfidf;
bucket++;
}
}
}
void CalcTfidfWrapper(CalcFreqController *token_division_controller_host, MyHashMapElement **hash_doc_token_tables_host, MyHashMapElement *occ_hash_table_remote, int docs_count, float *bucket_sqrt_sum, int g, int b, int table_size)
{
//cudaMalloc
cudaMalloc(&bucket_sqrt_sum_device, HASH_DOC_TOKEN_TABLE_SIZE * sizeof(float));
//kernel calls
CalcTfIdf<<<g, b>>>(token_division_controller_device, hash_doc_token_tables_device, occ_hash_table_device, docs_count, bucket_sqrt_sum_device, pitch2);
CalcTfIdf2<<<g, b>>>(token_division_controller_device, hash_doc_token_tables_device, occ_hash_table_device, docs_count, bucket_sqrt_sum_device, pitch2);
//cudaMemcpy
for(int j=0; j< docs_count;j++)
cudaMemcpy(hash_doc_token_tables_host[j], (MyHashMapElement *)((char*)hash_doc_token_tables_device + j * pitch2), table_size * sizeof(MyHashMapElement), cudaMemcpyDeviceToHost);
}
__global__ void CalcSimilarities(MyHashMapElement *hash_doc_token_tables, MyHashMapElement *occ_hash_table_remote, float *similarity_matrix, int docs_count, size_t pitch2)
{
int col = threadIdx.x;
int row = blockIdx.x;
int row2 = blockIdx.y;
int maxCols = HASH_DOC_TOKEN_TABLE_SIZE;
if(col < HASH_DOC_TOKEN_TABLE_SIZE && row < docs_count && row2 < docs_count)
{
MyHashMapElement *hashDoc_token_table1 = (MyHashMapElement *)((char *)hash_doc_token_tables + row * pitch2);
MyHashMapElement *hashDoc_token_table2 = (MyHashMapElement *)((char *)hash_doc_token_tables + row2 * pitch2);
__shared__ float sim_sum[HASH_DOC_TOKEN_TABLE_SIZE];
sim_sum[col] = 0.0f;
MyHashMapElement *bucket1 = hashDoc_token_table1 + col * HASH_DOC_TOKEN_BUCKET_SIZE;
int num_ele_1 = bucket1->countInBuc;
while (num_ele_1--)
{
MyHashMapElement *bucket2 = hashDoc_token_table2 + col * HASH_DOC_TOKEN_BUCKET_SIZE;
int num_ele_2 = bucket2->countInBuc;
int find = 0;
while (num_ele_2--)
{
if ((bucket2->key == bucket1->key) && (bucket2->tokenLength == bucket1->tokenLength))
{
find = 1;
break;
}
bucket2++;
}
if (find)
sim_sum[col] += bucket1->tfidf * bucket2->tfidf;
bucket1++;
}
__syncthreads();
if(col == 0)
{
float sum = 0.0f;
for(int i = 0; i < maxCols; i++)
sum += sim_sum[i];
similarity_matrix[docs_count * row + row2] = sum;
}
}
}
void CalcSimilaritiesWrapper(MyHashMapElement **hash_doc_token_tables_host, MyHashMapElement *occ_hash_table_remote, float *doc_similarity_matrix_host, int docs_count, int g, int b)
{
//cudaMalloc
cudaMalloc(&doc_similarity_matrix_device, docs_count * docs_count * sizeof(float));
dim3 threadsPerBlock(b, b);
dim3 numBlocks(g/2,g/2);
//kernel calls
CalcSimilarities<<<numBlocks, b>>>(hash_doc_token_tables_device, occ_hash_table_device, doc_similarity_matrix_device, docs_count, pitch2);
//cudaMemcpy
cudaMemcpy(doc_similarity_matrix_host, doc_similarity_matrix_device, docs_count * docs_count * sizeof(float),cudaMemcpyDeviceToHost);
}
__global__ void SortSimilarities2(float *similarity_matrix, int *rank_matrix, int docs_count, float *simbase, int *rankbase)
{
int col = threadIdx.x;
int row = blockIdx.x;
if(col < docs_count && row < docs_count)
{
simbase = (float *)similarity_matrix+row*docs_count;
rankbase = (int *)rank_matrix+row * docs_count;
float my_value = *((float *)simbase+col);
int myRank = 0;
for (int i = 0; i != docs_count; i++)
{
if (i == col)
continue;
if (*((float *)simbase+i) > my_value)
myRank = myRank + 1;
}
*((int *)rankbase+col) = myRank;
}
}
void SortSimilarities2Wrapper(float *doc_similarity_matrix_host, int *doc_rank_matrix_host, int docs_count, int g, int b)
{
//cudaMalloc
cudaMalloc(&doc_rank_matrix_device, docs_count * docs_count * sizeof(int));
cudaMalloc(&simbase, docs_count*sizeof(float));
cudaMalloc(&rankbase, docs_count*sizeof(int));
//kernel call
SortSimilarities2<<<g, b>>>(doc_similarity_matrix_device, doc_rank_matrix_device, docs_count, simbase, rankbase);
//cudaMemcpy
cudaMemcpy(doc_rank_matrix_host, doc_rank_matrix_device, docs_count * docs_count * sizeof(int), cudaMemcpyDeviceToHost);
//cudaFree
cudaFree(&doc_rank_matrix_device);
cudaFree(&doc_similarity_matrix_device);
cudaFree(&hash_doc_token_tables_device);
cudaFree(&occ_hash_table_device);
cudaFree(&token_division_controller_device);
cudaFree(&bucket_sqrt_sum_device);
cudaFree(device_local);
cudaFree(token_length_device);
cudaFree(hash_doc_token_sub_tables_device);
}
|
9b4ed533aa9032b1999c0c8f662ee0c18de2950a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/TensorUtils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <stdio.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include "cuda_helpers.h"
template <typename T>
__device__ T bilinear_interpolate(
const T* input,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = input[y_low * width + x_low];
T v2 = input[y_low * width + x_high];
T v3 = input[y_high * width + x_low];
T v4 = input[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void PSROIAlignForwardCUDA(
const int nthreads,
const T* input,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* rois,
const int channels_out,
T* output,
int* channel_mapping) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c_out, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c_out = (index / pooled_width / pooled_height) % channels_out;
int n = index / pooled_width / pooled_height / channels_out;
// (n, c_in, ph, pw) is the associated element in the input
int c_in = (c_out * pooled_height + ph) * pooled_width + pw;
// [start, end) interval for spatial sampling
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5);
T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5);
T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5);
T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5);
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
// Do not using floor/ceil; this implementation detail is critical
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
const T* offset_input =
input + (roi_batch_ind * channels + c_in) * height * width;
T out_sum = 0;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = hstart +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = wstart +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_input, height, width, y, x, index);
out_sum += val;
}
}
out_sum /= count;
output[index] = out_sum;
channel_mapping[index] = c_in;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = input[y_low * width + x_low];
// T v2 = input[y_low * width + x_high];
// T v3 = input[y_high * width + x_low];
// T v4 = input[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void PSROIAlignBackwardCUDA(
const int nthreads,
const T* grad_output,
const int* channel_mapping,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const int channels_out,
T* grad_input,
const T* rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, *, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels_out;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5);
T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5);
T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5);
T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5);
// Force too small ROIs to be 1x1
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int c_in = channel_mapping[index];
T* grad_input_offset =
grad_input + (roi_batch_ind * channels + c_in) * height * width;
// Do not using floor/ceil; this implementation detail is critical
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
const T grad_output_this_bin = grad_output[index];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = hstart +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = wstart +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = grad_output_this_bin * w1 / count;
T g2 = grad_output_this_bin * w2 / count;
T g3 = grad_output_this_bin * w3 / count;
T g4 = grad_output_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(grad_input_offset + y_low * width + x_low, g1);
atomicAdd(grad_input_offset + y_low * width + x_high, g2);
atomicAdd(grad_input_offset + y_high * width + x_low, g3);
atomicAdd(grad_input_offset + y_high * width + x_high, g4);
} // if
} // ix
} // iy
}
}
std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "PSROIAlign_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
AT_ASSERTM(
channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width");
int channels_out = channels / (pooled_height * pooled_width);
auto output = at::zeros(
{num_rois, channels_out, pooled_height, pooled_width}, input.options());
auto channel_mapping =
at::zeros(output.sizes(), input.options().dtype(at::kInt));
auto output_size = output.numel();
if (output_size == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(output, channel_mapping);
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "PSROIAlign_forward", [&] {
hipLaunchKernelGGL(( PSROIAlignForwardCUDA<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data_ptr<scalar_t>(),
channels_out,
output.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>());
});
AT_CUDA_CHECK(hipGetLastError());
hipDeviceSynchronize();
return std::make_tuple(output, channel_mapping);
}
at::Tensor PSROIAlign_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& channel_mapping,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const int batch_size,
const int channels,
const int height,
const int width) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(
channel_mapping.is_cuda(),
"channel_mapping must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
channel_mapping_t{channel_mapping, "channel_mapping", 3};
at::CheckedFrom c = "PSROIAlign_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
int channels_out = channels / (pooled_height * pooled_width);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "PSROIAlign_backward", [&] {
hipLaunchKernelGGL(( PSROIAlignBackwardCUDA<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
channels_out,
grad_input.data_ptr<scalar_t>(),
rois.contiguous().data_ptr<scalar_t>());
});
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
|
9b4ed533aa9032b1999c0c8f662ee0c18de2950a.cu
|
#include <ATen/ATen.h>
#include <ATen/TensorUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <stdio.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include "cuda_helpers.h"
template <typename T>
__device__ T bilinear_interpolate(
const T* input,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = input[y_low * width + x_low];
T v2 = input[y_low * width + x_high];
T v3 = input[y_high * width + x_low];
T v4 = input[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void PSROIAlignForwardCUDA(
const int nthreads,
const T* input,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* rois,
const int channels_out,
T* output,
int* channel_mapping) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c_out, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c_out = (index / pooled_width / pooled_height) % channels_out;
int n = index / pooled_width / pooled_height / channels_out;
// (n, c_in, ph, pw) is the associated element in the input
int c_in = (c_out * pooled_height + ph) * pooled_width + pw;
// [start, end) interval for spatial sampling
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5);
T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5);
T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5);
T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5);
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
// Do not using floor/ceil; this implementation detail is critical
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
const T* offset_input =
input + (roi_batch_ind * channels + c_in) * height * width;
T out_sum = 0;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = hstart +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = wstart +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_input, height, width, y, x, index);
out_sum += val;
}
}
out_sum /= count;
output[index] = out_sum;
channel_mapping[index] = c_in;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = input[y_low * width + x_low];
// T v2 = input[y_low * width + x_high];
// T v3 = input[y_high * width + x_low];
// T v4 = input[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void PSROIAlignBackwardCUDA(
const int nthreads,
const T* grad_output,
const int* channel_mapping,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const int channels_out,
T* grad_input,
const T* rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, *, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels_out;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5);
T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5);
T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5);
T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5);
// Force too small ROIs to be 1x1
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int c_in = channel_mapping[index];
T* grad_input_offset =
grad_input + (roi_batch_ind * channels + c_in) * height * width;
// Do not using floor/ceil; this implementation detail is critical
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
const T grad_output_this_bin = grad_output[index];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = hstart +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = wstart +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = grad_output_this_bin * w1 / count;
T g2 = grad_output_this_bin * w2 / count;
T g3 = grad_output_this_bin * w3 / count;
T g4 = grad_output_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(grad_input_offset + y_low * width + x_low, g1);
atomicAdd(grad_input_offset + y_low * width + x_high, g2);
atomicAdd(grad_input_offset + y_high * width + x_low, g3);
atomicAdd(grad_input_offset + y_high * width + x_high, g4);
} // if
} // ix
} // iy
}
}
std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "PSROIAlign_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::cuda::CUDAGuard device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
AT_ASSERTM(
channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width");
int channels_out = channels / (pooled_height * pooled_width);
auto output = at::zeros(
{num_rois, channels_out, pooled_height, pooled_width}, input.options());
auto channel_mapping =
at::zeros(output.sizes(), input.options().dtype(at::kInt));
auto output_size = output.numel();
if (output_size == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, channel_mapping);
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "PSROIAlign_forward", [&] {
PSROIAlignForwardCUDA<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data_ptr<scalar_t>(),
channels_out,
output.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>());
});
AT_CUDA_CHECK(cudaGetLastError());
cudaDeviceSynchronize();
return std::make_tuple(output, channel_mapping);
}
at::Tensor PSROIAlign_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& channel_mapping,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const int batch_size,
const int channels,
const int height,
const int width) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(
channel_mapping.is_cuda(),
"channel_mapping must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
channel_mapping_t{channel_mapping, "channel_mapping", 3};
at::CheckedFrom c = "PSROIAlign_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::cuda::CUDAGuard device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
int channels_out = channels / (pooled_height * pooled_width);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "PSROIAlign_backward", [&] {
PSROIAlignBackwardCUDA<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
channels_out,
grad_input.data_ptr<scalar_t>(),
rois.contiguous().data_ptr<scalar_t>());
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
|
f1053ffe311fd1470f0c35619316d366afbdea51.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// RoiAlignmentLayer written by Thanh-Toan Do
// Only a single value at each bin center is interpolated
// ------------------------------------------------------------------
#include <cfloat>
#include <stdio.h>
#include <math.h>
#include <float.h>
//#include "caffe/fast_rcnn_layers.hpp"
#include "caffe/roi_alignment_layers.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
//__global__ void ROIAlignForward(const int nthreads, const Dtype* bottom_data,
// const Dtype spatial_scale, const int channels, const int height,
// const int width, const int pooled_height, const int pooled_width,
// const Dtype* bottom_rois, Dtype* top_data, int* argmax_data)
__global__ void ROIAlignForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data,
int* argmax_data_topleft, int* argmax_data_topright,
int* argmax_data_bottomleft, int* argmax_data_bottomright,
float* dh_ratio_data, float* dw_ratio_data)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
// determine region of interest (roi) w.r.t conv5_3 size
float roi_start_w = (float)(bottom_rois[1] * spatial_scale); //spatial_scale = 1/16
float roi_start_h = (float)(bottom_rois[2] * spatial_scale);
float roi_end_w = (float)(bottom_rois[3] * spatial_scale);
float roi_end_h = (float)(bottom_rois[4] * spatial_scale);
// printf(" start w h end w e %f %f %f %f\n", roi_start_w, roi_start_h, roi_end_w, roi_end_h);
// Force malformed ROIs to be 1x1
float roi_width = fmaxf(roi_end_w - roi_start_w + 1, 1);
float roi_height = fmaxf(roi_end_h - roi_start_h + 1, 1);
float bin_size_h = (roi_height) / ((float)(pooled_height));
float bin_size_w = (roi_width) / ((float)(pooled_width));
// printf("(roi_height, roi_width): %f %f (bin_size_h, bin_size_w): %f %f\n", roi_height, roi_width, bin_size_h, bin_size_w);
float hstart = ((float)(ph)) * bin_size_h;
float wstart = ((float)(pw)) * bin_size_w;
float hend = ((float)(ph + 1)) * bin_size_h;
float wend = ((float)(pw + 1)) * bin_size_w;
// Add roi offsets and clip to input boundaries
// hstart = fminf(fmaxf(hstart + roi_start_h, 0), height);
// hend = fminf(fmaxf(hend + roi_start_h, 0), height);
// wstart = fminf(fmaxf(wstart + roi_start_w, 0), width);
// wend = fminf(fmaxf(wend + roi_start_w, 0), width);
// bool is_empty = (hend <= hstart) || (wend <= wstart);
hstart = fminf(fmaxf(hstart + roi_start_h, 0), height-1);
hend = fminf(fmaxf(hend + roi_start_h, 0), height-1);
wstart = fminf(fmaxf(wstart + roi_start_w, 0), width-1);
wend = fminf(fmaxf(wend + roi_start_w, 0), width-1);
// printf("===========(hstart, wstar, hend, wend): %f %f %f %f\n",hstart, wstart, hend, wend);
bool is_empty = (hend < hstart) || (wend < wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// Dtype maxval = is_empty ? 0 : 0;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
/*
for (int h = hstart; h < hend; ++h)
{
for (int w = wstart; w < wend; ++w)
{
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval)
{
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
*/
if (is_empty)
{
// printf("====================invalid roi forward=====================\n");
top_data[index] = maxval;
// argmax_data[index] = maxidx;
argmax_data_topleft[index] = maxidx;
argmax_data_topright[index] = maxidx;
argmax_data_bottomleft[index] = maxidx;
argmax_data_bottomright[index] = maxidx;
dh_ratio_data[index] = 0.0;
dw_ratio_data[index] = 0.0;
}
else
{
// find the center of sub window (w.r.t. conv5_3 size)
// float centerx = (wstart + wend)/2.0;
// float centery = (hstart + hend)/2.0;
float centerx = (float)(wstart + wend)/2.0;
float centery = (float)(hstart + hend)/2.0;
// 4 nearest around the center
int cy_top = static_cast<int>(floor(centery));
int cy_bottom = static_cast<int>(ceil(centery));
int cx_left = static_cast<int>(floor(centerx));
int cx_right = static_cast<int>(ceil(centerx));
// cy_top = min(max(cy_top, 0), height);
// cy_bottom = min(max(cy_bottom, 0), height);
// cx_left = min(max(cx_left, 0), width);
// cx_right = min(max(cx_right, 0), width);
cy_top = min(max(cy_top, 0), height-1);
cy_bottom = min(max(cy_bottom, 0), height-1);
cx_left = min(max(cx_left, 0), width-1);
cx_right = min(max(cx_right, 0), width-1);
// find indexes of 4 nearest around the center
int topleft = cy_top * width + cx_left;
int topright = cy_top * width + cx_right;
int bottomleft = cy_bottom * width + cx_left;
int bottomright = cy_bottom * width + cx_right;
// bilinear interpolate bin value using the 4 around nearest
float y_ratio = centery - (float)(cy_top); // vertical distance to topleft
float x_ratio = centerx - (float)(cx_left); // horizontal distance to topleft
maxval = bottom_data[topleft] * (1-y_ratio) * (1-x_ratio)
+ bottom_data[topright] * (1-y_ratio) * (x_ratio)
+ bottom_data[bottomleft] * (y_ratio) * (1 - x_ratio)
+ bottom_data[bottomright] * (y_ratio) * (x_ratio);
// printf("(height, width): %d %d (hstart, hend, wstar, wend): %f %f %f %f (centery, centerx): %f %f "
// "(cy_top, cx_left): %d %d (cy_bottom, cx_right): %d %d (y_ratio, x_ratio): %f %f "
// "(topleft, topright, bottomleft, bottomright): %d %d %d %d\n",
// height, width, hstart, hend, wstart, wend, centery, centerx, cy_top, cx_left, cy_bottom, cx_right,
// y_ratio, x_ratio, topleft, topright, bottomleft, bottomright);
// maxval = bottom_data[topleft]
// + bottom_data[topright]
// + bottom_data[bottomleft]
// + bottom_data[bottomright]; //PASS
// maxval = bottom_data[topleft]; // PASS
// maxval = bottom_data[topright]; // PASS
// maxval = bottom_data[bottomleft]; //PASS
// maxval = bottom_data[bottomright]; //PASS
top_data[index] = maxval;
// printf("topleftdata: %f toprightdata: %f\n", float(bottom_data[topleft]), float(bottom_data[topright]));
// argmax_data[index] = maxidx;
argmax_data_topleft[index] = topleft;
argmax_data_topright[index] = topright;
argmax_data_bottomleft[index] = bottomleft;
argmax_data_bottomright[index] = bottomright;
dh_ratio_data[index] = y_ratio;
dw_ratio_data[index] = x_ratio;
}
}
}
template <typename Dtype>
void ROIAlignmentLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
// int* argmax_data = max_idx_.mutable_gpu_data();
/////////////////////////////////////////////////
int* argmax_data_topleft = max_idx_topleft.mutable_gpu_data();
int* argmax_data_topright = max_idx_topright.mutable_gpu_data();
int* argmax_data_bottomleft = max_idx_bottomleft.mutable_gpu_data();
int* argmax_data_bottomright = max_idx_bottomright.mutable_gpu_data();
float* dh_ratio_data = dh_ratio.mutable_gpu_data();
float* dw_ratio_data = dw_ratio.mutable_gpu_data();
/////////////////////////////////////////////////
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
// hipLaunchKernelGGL(( ROIAlignForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
// count, bottom_data, spatial_scale_, channels_, height_, width_,
// pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
hipLaunchKernelGGL(( ROIAlignForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data,
argmax_data_topleft, argmax_data_topright,
argmax_data_bottomleft, argmax_data_bottomright,
dh_ratio_data, dw_ratio_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
//__global__ void ROIAlignBackward(const int nthreads, const Dtype* top_diff,
// const int* argmax_data, const int num_rois, const Dtype spatial_scale,
// const int channels, const int height, const int width,
// const int pooled_height, const int pooled_width, Dtype* bottom_diff,
// const Dtype* bottom_rois)
__global__ void ROIAlignBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data_topleft, const int* argmax_data_topright,
const int* argmax_data_bottomleft, const int* argmax_data_bottomright,
const float* dh_ratio_data, const float* dw_ratio_data,
const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
// printf("num_rois: %d spatial_scale: %f channels: %d (pooled_height, pooled_width): %d %d\n",
// num_rois, (float)spatial_scale, channels, pooled_height, pooled_width);
// (n, c, h, w) coords in bottom data (in conv5_3)
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0.;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n)
{
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind){
// printf("====================invalid roi by batch index doesn't match=====================\n");
continue;
}
float roi_start_w = (float)(offset_bottom_rois[1] * spatial_scale);
float roi_start_h = (float)(offset_bottom_rois[2] * spatial_scale);
float roi_end_w = (float)(offset_bottom_rois[3] * spatial_scale);
float roi_end_h = (float)(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
//const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h);
const bool in_roi = (w >= roi_start_w-1 && w <= roi_end_w+1 && h >= roi_start_h-1 && h <= roi_end_h+1); // -1/+1 because a (h,w) outside roi could have used for interpolation
if (!in_roi) {
// printf("====================invalid roi by ROI doesn't include (h, w)=====================\n");
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
// const int* offset_argmax_data = argmax_data + offset;
///////////////////////////////////////////////////////
const int* offset_argmax_data_topright = argmax_data_topright + offset;
const int* offset_argmax_data_topleft = argmax_data_topleft + offset;
const int* offset_argmax_data_bottomleft = argmax_data_bottomleft + offset;
const int* offset_argmax_data_bottomright = argmax_data_bottomright + offset;
const float* offset_dh_ratio_data = dh_ratio_data + offset;
const float* offset_dw_ratio_data = dw_ratio_data + offset;
///////////////////////////////////////////////////////
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
float roi_width = fmaxf(roi_end_w - roi_start_w + 1, 1);
float roi_height = fmaxf(roi_end_h - roi_start_h + 1, 1);
float bin_size_h = (roi_height) / ((float)(pooled_height));
float bin_size_w = (roi_width) / ((float)(pooled_width));
// printf("(roi_height, roi_width): %f %f (bin_size_h, bin_size_w): %f %f\n", roi_height, roi_width, bin_size_h, bin_size_w);
int phstart = floor(((float)h - roi_start_h) / bin_size_h);
int phend = ceil(((float)h - roi_start_h) / bin_size_h);
int pwstart = floor(((float)w - roi_start_w) / bin_size_w);
int pwend = ceil(((float)w - roi_start_w) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
// phstart = min(max(phstart, 0), pooled_height);
// phend = min(max(phend, 0), pooled_height);
// pwstart = min(max(pwstart, 0), pooled_width);
// pwend = min(max(pwend, 0), pooled_width);
phstart = 0;
phend = pooled_height;
pwstart = 0;
pwend = pooled_width;
for (int ph = phstart; ph < phend; ++ph)
{
for (int pw = pwstart; pw < pwend; ++pw)
{
/*
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w))
{
gradient += offset_top_diff[ph * pooled_width + pw];
}
*/
int topright = offset_argmax_data_topright[ph * pooled_width + pw];
int topleft = offset_argmax_data_topleft[ph * pooled_width + pw];
int bottomleft = offset_argmax_data_bottomleft[ph * pooled_width + pw];
int bottomright = offset_argmax_data_bottomright[ph * pooled_width + pw];
float y_ratio = offset_dh_ratio_data[ph * pooled_width + pw];
float x_ratio = offset_dw_ratio_data[ph * pooled_width + pw];
// gradient += offset_top_diff[ph * pooled_width + pw]; // --> gradient # 0
if (topleft == (h * width + w)){
gradient += (offset_top_diff[ph * pooled_width + pw] * (1. - y_ratio)*(1. - x_ratio));
// gradient = 100; // --> gradient # 0
// gradient += offset_top_diff[ph * pooled_width + pw]; // --> gradient # 0
// gradient += (x_ratio + y_ratio); // --> gradient # 0
// gradient += (1. - y_ratio)*(1. - x_ratio);
// gradient += (float)offset_top_diff[ph * pooled_width + pw];
// gradient += offset_top_diff[ph * pooled_width + pw] * 0.5;
// gradient += offset_top_diff[ph * pooled_width + pw]; // PASS
// printf("topleft: %d offset_top_diff: %f\n", topleft, (float)offset_top_diff[ph * pooled_width + pw]);
}
if (topright == (h * width + w)){
gradient += (offset_top_diff[ph * pooled_width + pw]*(1. -y_ratio)*(x_ratio));
// gradient += offset_top_diff[ph * pooled_width + pw]; // --> gradient # 0
// gradient = 100; // --> gradient # 0
// gradient += (x_ratio + y_ratio); // --> gradient # 0
// gradient += (1. -y_ratio)*(x_ratio * 1.);
// gradient += (float)offset_top_diff[ph * pooled_width + pw];
// gradient += (float)offset_top_diff[ph * pooled_width + pw] * (1. -y_ratio)*(x_ratio);
// gradient += offset_top_diff[ph * pooled_width + pw] * 0.5;
// gradient += offset_top_diff[ph * pooled_width + pw];
// printf("topright: %d offset_top_diff: %f\n", topright, (float)offset_top_diff[ph * pooled_width + pw]);
}
if (bottomleft == (h * width + w)){
gradient += (offset_top_diff[ph * pooled_width + pw]* (y_ratio) * (1. - x_ratio));
// gradient += offset_top_diff[ph * pooled_width + pw]; // --> gradient # 0
// gradient = 100; // --> gradient # 0
// gradient += (x_ratio + y_ratio); // --> gradient # 0
// gradient += (y_ratio * 1. ) * (1. - x_ratio);
// gradient += (float)offset_top_diff[ph * pooled_width + pw];
// gradient += (float)offset_top_diff[ph * pooled_width + pw] * (y_ratio) * (1. - x_ratio);
// gradient += offset_top_diff[ph * pooled_width + pw] * 0.5;
// gradient += offset_top_diff[ph * pooled_width + pw];
}
if (bottomright == (h * width + w)){
gradient += (offset_top_diff[ph * pooled_width + pw]*(y_ratio) * (x_ratio));
// gradient += offset_top_diff[ph * pooled_width + pw]; // --> gradient # 0
// gradient = 100; // --> gradient # 0
// gradient += (x_ratio + y_ratio); // --> gradient # 0
// gradient += (y_ratio * 1.) * (1. * x_ratio);
// gradient += (float)offset_top_diff[ph * pooled_width + pw];
// gradient += (float)offset_top_diff[ph * pooled_width + pw] * (y_ratio) * (x_ratio);
// gradient += offset_top_diff[ph * pooled_width + pw] * 0.5;
// gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIAlignmentLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// const int* argmax_data = max_idx_.gpu_data();
/////////////////////////////////////////////////////////////////
const int* argmax_data_topleft = max_idx_topleft.gpu_data();
const int* argmax_data_topright = max_idx_topright.gpu_data();
const int* argmax_data_bottomleft = max_idx_bottomleft.gpu_data();
const int* argmax_data_bottomright = max_idx_bottomright.gpu_data();
const float* dh_ratio_data = dh_ratio.gpu_data();
const float* dw_ratio_data = dw_ratio.gpu_data();
////////////////////////////////////////////////////////////////
// NOLINT_NEXT_LINE(whitespace/operators)
// hipLaunchKernelGGL(( ROIAlignBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
// count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
// height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
hipLaunchKernelGGL(( ROIAlignBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff,
argmax_data_topleft, argmax_data_topright,
argmax_data_bottomleft, argmax_data_bottomright,
dh_ratio_data, dw_ratio_data,
top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIAlignmentLayer);
} // namespace caffe
|
f1053ffe311fd1470f0c35619316d366afbdea51.cu
|
// ------------------------------------------------------------------
// RoiAlignmentLayer written by Thanh-Toan Do
// Only a single value at each bin center is interpolated
// ------------------------------------------------------------------
#include <cfloat>
#include <stdio.h>
#include <math.h>
#include <float.h>
//#include "caffe/fast_rcnn_layers.hpp"
#include "caffe/roi_alignment_layers.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
//__global__ void ROIAlignForward(const int nthreads, const Dtype* bottom_data,
// const Dtype spatial_scale, const int channels, const int height,
// const int width, const int pooled_height, const int pooled_width,
// const Dtype* bottom_rois, Dtype* top_data, int* argmax_data)
__global__ void ROIAlignForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data,
int* argmax_data_topleft, int* argmax_data_topright,
int* argmax_data_bottomleft, int* argmax_data_bottomright,
float* dh_ratio_data, float* dw_ratio_data)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
// determine region of interest (roi) w.r.t conv5_3 size
float roi_start_w = (float)(bottom_rois[1] * spatial_scale); //spatial_scale = 1/16
float roi_start_h = (float)(bottom_rois[2] * spatial_scale);
float roi_end_w = (float)(bottom_rois[3] * spatial_scale);
float roi_end_h = (float)(bottom_rois[4] * spatial_scale);
// printf(" start w h end w e %f %f %f %f\n", roi_start_w, roi_start_h, roi_end_w, roi_end_h);
// Force malformed ROIs to be 1x1
float roi_width = fmaxf(roi_end_w - roi_start_w + 1, 1);
float roi_height = fmaxf(roi_end_h - roi_start_h + 1, 1);
float bin_size_h = (roi_height) / ((float)(pooled_height));
float bin_size_w = (roi_width) / ((float)(pooled_width));
// printf("(roi_height, roi_width): %f %f (bin_size_h, bin_size_w): %f %f\n", roi_height, roi_width, bin_size_h, bin_size_w);
float hstart = ((float)(ph)) * bin_size_h;
float wstart = ((float)(pw)) * bin_size_w;
float hend = ((float)(ph + 1)) * bin_size_h;
float wend = ((float)(pw + 1)) * bin_size_w;
// Add roi offsets and clip to input boundaries
// hstart = fminf(fmaxf(hstart + roi_start_h, 0), height);
// hend = fminf(fmaxf(hend + roi_start_h, 0), height);
// wstart = fminf(fmaxf(wstart + roi_start_w, 0), width);
// wend = fminf(fmaxf(wend + roi_start_w, 0), width);
// bool is_empty = (hend <= hstart) || (wend <= wstart);
hstart = fminf(fmaxf(hstart + roi_start_h, 0), height-1);
hend = fminf(fmaxf(hend + roi_start_h, 0), height-1);
wstart = fminf(fmaxf(wstart + roi_start_w, 0), width-1);
wend = fminf(fmaxf(wend + roi_start_w, 0), width-1);
// printf("===========(hstart, wstar, hend, wend): %f %f %f %f\n",hstart, wstart, hend, wend);
bool is_empty = (hend < hstart) || (wend < wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// Dtype maxval = is_empty ? 0 : 0;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
/*
for (int h = hstart; h < hend; ++h)
{
for (int w = wstart; w < wend; ++w)
{
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval)
{
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
*/
if (is_empty)
{
// printf("====================invalid roi forward=====================\n");
top_data[index] = maxval;
// argmax_data[index] = maxidx;
argmax_data_topleft[index] = maxidx;
argmax_data_topright[index] = maxidx;
argmax_data_bottomleft[index] = maxidx;
argmax_data_bottomright[index] = maxidx;
dh_ratio_data[index] = 0.0;
dw_ratio_data[index] = 0.0;
}
else
{
// find the center of sub window (w.r.t. conv5_3 size)
// float centerx = (wstart + wend)/2.0;
// float centery = (hstart + hend)/2.0;
float centerx = (float)(wstart + wend)/2.0;
float centery = (float)(hstart + hend)/2.0;
// 4 nearest around the center
int cy_top = static_cast<int>(floor(centery));
int cy_bottom = static_cast<int>(ceil(centery));
int cx_left = static_cast<int>(floor(centerx));
int cx_right = static_cast<int>(ceil(centerx));
// cy_top = min(max(cy_top, 0), height);
// cy_bottom = min(max(cy_bottom, 0), height);
// cx_left = min(max(cx_left, 0), width);
// cx_right = min(max(cx_right, 0), width);
cy_top = min(max(cy_top, 0), height-1);
cy_bottom = min(max(cy_bottom, 0), height-1);
cx_left = min(max(cx_left, 0), width-1);
cx_right = min(max(cx_right, 0), width-1);
// find indexes of 4 nearest around the center
int topleft = cy_top * width + cx_left;
int topright = cy_top * width + cx_right;
int bottomleft = cy_bottom * width + cx_left;
int bottomright = cy_bottom * width + cx_right;
// bilinear interpolate bin value using the 4 around nearest
float y_ratio = centery - (float)(cy_top); // vertical distance to topleft
float x_ratio = centerx - (float)(cx_left); // horizontal distance to topleft
maxval = bottom_data[topleft] * (1-y_ratio) * (1-x_ratio)
+ bottom_data[topright] * (1-y_ratio) * (x_ratio)
+ bottom_data[bottomleft] * (y_ratio) * (1 - x_ratio)
+ bottom_data[bottomright] * (y_ratio) * (x_ratio);
// printf("(height, width): %d %d (hstart, hend, wstar, wend): %f %f %f %f (centery, centerx): %f %f "
// "(cy_top, cx_left): %d %d (cy_bottom, cx_right): %d %d (y_ratio, x_ratio): %f %f "
// "(topleft, topright, bottomleft, bottomright): %d %d %d %d\n",
// height, width, hstart, hend, wstart, wend, centery, centerx, cy_top, cx_left, cy_bottom, cx_right,
// y_ratio, x_ratio, topleft, topright, bottomleft, bottomright);
// maxval = bottom_data[topleft]
// + bottom_data[topright]
// + bottom_data[bottomleft]
// + bottom_data[bottomright]; //PASS
// maxval = bottom_data[topleft]; // PASS
// maxval = bottom_data[topright]; // PASS
// maxval = bottom_data[bottomleft]; //PASS
// maxval = bottom_data[bottomright]; //PASS
top_data[index] = maxval;
// printf("topleftdata: %f toprightdata: %f\n", float(bottom_data[topleft]), float(bottom_data[topright]));
// argmax_data[index] = maxidx;
argmax_data_topleft[index] = topleft;
argmax_data_topright[index] = topright;
argmax_data_bottomleft[index] = bottomleft;
argmax_data_bottomright[index] = bottomright;
dh_ratio_data[index] = y_ratio;
dw_ratio_data[index] = x_ratio;
}
}
}
template <typename Dtype>
void ROIAlignmentLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
// int* argmax_data = max_idx_.mutable_gpu_data();
/////////////////////////////////////////////////
int* argmax_data_topleft = max_idx_topleft.mutable_gpu_data();
int* argmax_data_topright = max_idx_topright.mutable_gpu_data();
int* argmax_data_bottomleft = max_idx_bottomleft.mutable_gpu_data();
int* argmax_data_bottomright = max_idx_bottomright.mutable_gpu_data();
float* dh_ratio_data = dh_ratio.mutable_gpu_data();
float* dw_ratio_data = dw_ratio.mutable_gpu_data();
/////////////////////////////////////////////////
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
// ROIAlignForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
// count, bottom_data, spatial_scale_, channels_, height_, width_,
// pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
ROIAlignForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data,
argmax_data_topleft, argmax_data_topright,
argmax_data_bottomleft, argmax_data_bottomright,
dh_ratio_data, dw_ratio_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
//__global__ void ROIAlignBackward(const int nthreads, const Dtype* top_diff,
// const int* argmax_data, const int num_rois, const Dtype spatial_scale,
// const int channels, const int height, const int width,
// const int pooled_height, const int pooled_width, Dtype* bottom_diff,
// const Dtype* bottom_rois)
__global__ void ROIAlignBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data_topleft, const int* argmax_data_topright,
const int* argmax_data_bottomleft, const int* argmax_data_bottomright,
const float* dh_ratio_data, const float* dw_ratio_data,
const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
// printf("num_rois: %d spatial_scale: %f channels: %d (pooled_height, pooled_width): %d %d\n",
// num_rois, (float)spatial_scale, channels, pooled_height, pooled_width);
// (n, c, h, w) coords in bottom data (in conv5_3)
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0.;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n)
{
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind){
// printf("====================invalid roi by batch index doesn't match=====================\n");
continue;
}
float roi_start_w = (float)(offset_bottom_rois[1] * spatial_scale);
float roi_start_h = (float)(offset_bottom_rois[2] * spatial_scale);
float roi_end_w = (float)(offset_bottom_rois[3] * spatial_scale);
float roi_end_h = (float)(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
//const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h);
const bool in_roi = (w >= roi_start_w-1 && w <= roi_end_w+1 && h >= roi_start_h-1 && h <= roi_end_h+1); // -1/+1 because a (h,w) outside roi could have used for interpolation
if (!in_roi) {
// printf("====================invalid roi by ROI doesn't include (h, w)=====================\n");
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
// const int* offset_argmax_data = argmax_data + offset;
///////////////////////////////////////////////////////
const int* offset_argmax_data_topright = argmax_data_topright + offset;
const int* offset_argmax_data_topleft = argmax_data_topleft + offset;
const int* offset_argmax_data_bottomleft = argmax_data_bottomleft + offset;
const int* offset_argmax_data_bottomright = argmax_data_bottomright + offset;
const float* offset_dh_ratio_data = dh_ratio_data + offset;
const float* offset_dw_ratio_data = dw_ratio_data + offset;
///////////////////////////////////////////////////////
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
float roi_width = fmaxf(roi_end_w - roi_start_w + 1, 1);
float roi_height = fmaxf(roi_end_h - roi_start_h + 1, 1);
float bin_size_h = (roi_height) / ((float)(pooled_height));
float bin_size_w = (roi_width) / ((float)(pooled_width));
// printf("(roi_height, roi_width): %f %f (bin_size_h, bin_size_w): %f %f\n", roi_height, roi_width, bin_size_h, bin_size_w);
int phstart = floor(((float)h - roi_start_h) / bin_size_h);
int phend = ceil(((float)h - roi_start_h) / bin_size_h);
int pwstart = floor(((float)w - roi_start_w) / bin_size_w);
int pwend = ceil(((float)w - roi_start_w) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
// phstart = min(max(phstart, 0), pooled_height);
// phend = min(max(phend, 0), pooled_height);
// pwstart = min(max(pwstart, 0), pooled_width);
// pwend = min(max(pwend, 0), pooled_width);
phstart = 0;
phend = pooled_height;
pwstart = 0;
pwend = pooled_width;
for (int ph = phstart; ph < phend; ++ph)
{
for (int pw = pwstart; pw < pwend; ++pw)
{
/*
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w))
{
gradient += offset_top_diff[ph * pooled_width + pw];
}
*/
int topright = offset_argmax_data_topright[ph * pooled_width + pw];
int topleft = offset_argmax_data_topleft[ph * pooled_width + pw];
int bottomleft = offset_argmax_data_bottomleft[ph * pooled_width + pw];
int bottomright = offset_argmax_data_bottomright[ph * pooled_width + pw];
float y_ratio = offset_dh_ratio_data[ph * pooled_width + pw];
float x_ratio = offset_dw_ratio_data[ph * pooled_width + pw];
// gradient += offset_top_diff[ph * pooled_width + pw]; // --> gradient # 0
if (topleft == (h * width + w)){
gradient += (offset_top_diff[ph * pooled_width + pw] * (1. - y_ratio)*(1. - x_ratio));
// gradient = 100; // --> gradient # 0
// gradient += offset_top_diff[ph * pooled_width + pw]; // --> gradient # 0
// gradient += (x_ratio + y_ratio); // --> gradient # 0
// gradient += (1. - y_ratio)*(1. - x_ratio);
// gradient += (float)offset_top_diff[ph * pooled_width + pw];
// gradient += offset_top_diff[ph * pooled_width + pw] * 0.5;
// gradient += offset_top_diff[ph * pooled_width + pw]; // PASS
// printf("topleft: %d offset_top_diff: %f\n", topleft, (float)offset_top_diff[ph * pooled_width + pw]);
}
if (topright == (h * width + w)){
gradient += (offset_top_diff[ph * pooled_width + pw]*(1. -y_ratio)*(x_ratio));
// gradient += offset_top_diff[ph * pooled_width + pw]; // --> gradient # 0
// gradient = 100; // --> gradient # 0
// gradient += (x_ratio + y_ratio); // --> gradient # 0
// gradient += (1. -y_ratio)*(x_ratio * 1.);
// gradient += (float)offset_top_diff[ph * pooled_width + pw];
// gradient += (float)offset_top_diff[ph * pooled_width + pw] * (1. -y_ratio)*(x_ratio);
// gradient += offset_top_diff[ph * pooled_width + pw] * 0.5;
// gradient += offset_top_diff[ph * pooled_width + pw];
// printf("topright: %d offset_top_diff: %f\n", topright, (float)offset_top_diff[ph * pooled_width + pw]);
}
if (bottomleft == (h * width + w)){
gradient += (offset_top_diff[ph * pooled_width + pw]* (y_ratio) * (1. - x_ratio));
// gradient += offset_top_diff[ph * pooled_width + pw]; // --> gradient # 0
// gradient = 100; // --> gradient # 0
// gradient += (x_ratio + y_ratio); // --> gradient # 0
// gradient += (y_ratio * 1. ) * (1. - x_ratio);
// gradient += (float)offset_top_diff[ph * pooled_width + pw];
// gradient += (float)offset_top_diff[ph * pooled_width + pw] * (y_ratio) * (1. - x_ratio);
// gradient += offset_top_diff[ph * pooled_width + pw] * 0.5;
// gradient += offset_top_diff[ph * pooled_width + pw];
}
if (bottomright == (h * width + w)){
gradient += (offset_top_diff[ph * pooled_width + pw]*(y_ratio) * (x_ratio));
// gradient += offset_top_diff[ph * pooled_width + pw]; // --> gradient # 0
// gradient = 100; // --> gradient # 0
// gradient += (x_ratio + y_ratio); // --> gradient # 0
// gradient += (y_ratio * 1.) * (1. * x_ratio);
// gradient += (float)offset_top_diff[ph * pooled_width + pw];
// gradient += (float)offset_top_diff[ph * pooled_width + pw] * (y_ratio) * (x_ratio);
// gradient += offset_top_diff[ph * pooled_width + pw] * 0.5;
// gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIAlignmentLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// const int* argmax_data = max_idx_.gpu_data();
/////////////////////////////////////////////////////////////////
const int* argmax_data_topleft = max_idx_topleft.gpu_data();
const int* argmax_data_topright = max_idx_topright.gpu_data();
const int* argmax_data_bottomleft = max_idx_bottomleft.gpu_data();
const int* argmax_data_bottomright = max_idx_bottomright.gpu_data();
const float* dh_ratio_data = dh_ratio.gpu_data();
const float* dw_ratio_data = dw_ratio.gpu_data();
////////////////////////////////////////////////////////////////
// NOLINT_NEXT_LINE(whitespace/operators)
// ROIAlignBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
// count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
// height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
ROIAlignBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff,
argmax_data_topleft, argmax_data_topright,
argmax_data_bottomleft, argmax_data_bottomright,
dh_ratio_data, dw_ratio_data,
top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIAlignmentLayer);
} // namespace caffe
|
8a50726f4372be85d9bc3e3281ebea4429d177cb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <NvInfer.h>
#include <opencv2/opencv.hpp>
#include <chrono>
#include <ros/ros.h>
#include <utils.h>
/*
TODO 4
TODO
*/
using namespace nvinfer1;
class Logger : public ILogger {
void log(Severity severity, const char * msg) override {
if (severity != Severity::kINFO)
ROS_INFO("[[infer.cu]] %s", msg);
}
} gLogger;
// runtimes
IRuntime *runtime;
ICudaEngine *engine;
IExecutionContext *context;
int inputBindingIndex, outputBindingIndex;
int inputHeight, inputWidth;
Dims inputDims, outputDims;
bool is_initialized = false;
void *bindings[2];
// flags
bool use_mappedMemory;
// pointers
size_t numInput, numOutput;
float *inputDataHost, *outputDataHost;
float *inputDataDevice, *outputDataDevice;
void setup(std::string planFilename, std::string inputName, std::string outputName, bool _use_mappedMemory) {
ROS_INFO("setup");
std::ifstream planFile(planFilename.c_str());
if(!planFile.is_open()) {
ROS_INFO("cannot get plan file");
is_initialized = false;
} else {
std::stringstream planBuffer;
planBuffer << planFile.rdbuf();
std::string plan = planBuffer.str();
use_mappedMemory = _use_mappedMemory;
runtime = createInferRuntime(gLogger);
engine = runtime->deserializeCudaEngine((void*)plan.data(), plan.size(), nullptr);
context = engine->createExecutionContext();
ROS_INFO("load setup finished");
inputBindingIndex = engine->getBindingIndex(inputName.c_str());
outputBindingIndex = engine->getBindingIndex(outputName.c_str());
inputDims = engine->getBindingDimensions(inputBindingIndex);
outputDims = engine->getBindingDimensions(outputBindingIndex);
inputHeight = inputDims.d[1];
inputWidth = inputDims.d[2];
ROS_INFO("input: h=%d, w=%d", inputHeight, inputWidth);
numInput = numTensorElements(inputDims);
numOutput = numTensorElements(outputDims);
if (use_mappedMemory) {
// host
hipHostMalloc(&inputDataHost, numInput * sizeof(float), hipHostMallocMapped);
hipHostMalloc(&outputDataHost, numOutput * sizeof(float), hipHostMallocMapped);
// device
hipHostGetDevicePointer(&inputDataDevice, inputDataHost, 0);
hipHostGetDevicePointer(&outputDataDevice, outputDataHost, 0);
} else {
// host
inputDataHost = (float*) malloc(numInput * sizeof(float));
outputDataHost = (float*) malloc(numOutput * sizeof(float));
// device
hipMalloc(&inputDataDevice, numInput * sizeof(float));
hipMalloc(&outputDataDevice, numOutput * sizeof(float));
}
bindings[inputBindingIndex] = (void*)inputDataDevice;
bindings[outputBindingIndex] = (void*)outputDataDevice;
is_initialized = true;
ROS_INFO("initialize finished %d, %d", numInput, numOutput);
}
}
void destroy(void) {
if(is_initialized) {
runtime->destroy();
engine->destroy();
context->destroy();
if(use_mappedMemory) {
hipHostFree(inputDataHost);
hipHostFree(outputDataHost);
} else {
free(inputDataHost);
free(outputDataHost);
}
hipFree(inputDataDevice);
hipFree(outputDataDevice);
}
is_initialized = false;
}
void infer(cv::Mat image, float* out) {
// cvcnn
ROS_INFO("get");
// preprocessing
cv::resize(image, image, cv::Size(inputWidth, inputHeight));
cvImageToTensor(image, inputDataHost, inputDims);
preprocessVgg(inputDataHost, inputDims);
// execute on cuda
if (use_mappedMemory) {
context->execute(1, bindings);
} else {
hipMemcpy(inputDataDevice, inputDataHost, numInput * sizeof(float), hipMemcpyHostToDevice);
context->execute(1, bindings);
hipMemcpy(outputDataHost, outputDataDevice, numOutput * sizeof(float), hipMemcpyDeviceToHost);
}
// output
/* ROS_INFO("%f %f %f %f", outputDataHost[0], outputDataHost[1], outputDataHost[2], outputDataHost[3]); */
for (int i = 0; i < 4; i++) {
out[i] = outputDataHost[i];
}
}
void test(void) {
ROS_INFO("inside cu");
hipDeviceSynchronize();
}
|
8a50726f4372be85d9bc3e3281ebea4429d177cb.cu
|
#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <NvInfer.h>
#include <opencv2/opencv.hpp>
#include <chrono>
#include <ros/ros.h>
#include <utils.h>
/*
TODO 4決め打ちをなんとかする
TODO 推論制度の改善
*/
using namespace nvinfer1;
class Logger : public ILogger {
void log(Severity severity, const char * msg) override {
if (severity != Severity::kINFO)
ROS_INFO("[[infer.cu]] %s", msg);
}
} gLogger;
// runtimes
IRuntime *runtime;
ICudaEngine *engine;
IExecutionContext *context;
int inputBindingIndex, outputBindingIndex;
int inputHeight, inputWidth;
Dims inputDims, outputDims;
bool is_initialized = false;
void *bindings[2];
// flags
bool use_mappedMemory;
// pointers
size_t numInput, numOutput;
float *inputDataHost, *outputDataHost;
float *inputDataDevice, *outputDataDevice;
void setup(std::string planFilename, std::string inputName, std::string outputName, bool _use_mappedMemory) {
ROS_INFO("setup");
std::ifstream planFile(planFilename.c_str());
if(!planFile.is_open()) {
ROS_INFO("cannot get plan file");
is_initialized = false;
} else {
std::stringstream planBuffer;
planBuffer << planFile.rdbuf();
std::string plan = planBuffer.str();
use_mappedMemory = _use_mappedMemory;
runtime = createInferRuntime(gLogger);
engine = runtime->deserializeCudaEngine((void*)plan.data(), plan.size(), nullptr);
context = engine->createExecutionContext();
ROS_INFO("load setup finished");
inputBindingIndex = engine->getBindingIndex(inputName.c_str());
outputBindingIndex = engine->getBindingIndex(outputName.c_str());
inputDims = engine->getBindingDimensions(inputBindingIndex);
outputDims = engine->getBindingDimensions(outputBindingIndex);
inputHeight = inputDims.d[1];
inputWidth = inputDims.d[2];
ROS_INFO("input: h=%d, w=%d", inputHeight, inputWidth);
numInput = numTensorElements(inputDims);
numOutput = numTensorElements(outputDims);
if (use_mappedMemory) {
// host
cudaHostAlloc(&inputDataHost, numInput * sizeof(float), cudaHostAllocMapped);
cudaHostAlloc(&outputDataHost, numOutput * sizeof(float), cudaHostAllocMapped);
// device
cudaHostGetDevicePointer(&inputDataDevice, inputDataHost, 0);
cudaHostGetDevicePointer(&outputDataDevice, outputDataHost, 0);
} else {
// host
inputDataHost = (float*) malloc(numInput * sizeof(float));
outputDataHost = (float*) malloc(numOutput * sizeof(float));
// device
cudaMalloc(&inputDataDevice, numInput * sizeof(float));
cudaMalloc(&outputDataDevice, numOutput * sizeof(float));
}
bindings[inputBindingIndex] = (void*)inputDataDevice;
bindings[outputBindingIndex] = (void*)outputDataDevice;
is_initialized = true;
ROS_INFO("initialize finished %d, %d", numInput, numOutput);
}
}
void destroy(void) {
if(is_initialized) {
runtime->destroy();
engine->destroy();
context->destroy();
if(use_mappedMemory) {
cudaFreeHost(inputDataHost);
cudaFreeHost(outputDataHost);
} else {
free(inputDataHost);
free(outputDataHost);
}
cudaFree(inputDataDevice);
cudaFree(outputDataDevice);
}
is_initialized = false;
}
void infer(cv::Mat image, float* out) {
// cvの画像からcnnを走らせる
ROS_INFO("get");
// preprocessing
cv::resize(image, image, cv::Size(inputWidth, inputHeight));
cvImageToTensor(image, inputDataHost, inputDims);
preprocessVgg(inputDataHost, inputDims);
// execute on cuda
if (use_mappedMemory) {
context->execute(1, bindings);
} else {
cudaMemcpy(inputDataDevice, inputDataHost, numInput * sizeof(float), cudaMemcpyHostToDevice);
context->execute(1, bindings);
cudaMemcpy(outputDataHost, outputDataDevice, numOutput * sizeof(float), cudaMemcpyDeviceToHost);
}
// output
/* ROS_INFO("%f %f %f %f", outputDataHost[0], outputDataHost[1], outputDataHost[2], outputDataHost[3]); */
for (int i = 0; i < 4; i++) {
out[i] = outputDataHost[i];
}
}
void test(void) {
ROS_INFO("inside cu");
cudaDeviceSynchronize();
}
|
78d7319fb88635fcbb298530ae5e28e5c221c0da.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* This is a generic evaluator template. This file should be manually edited
* and adapted to the problem at hand
*
* @author Mehran Maghoumi
*
*/
/** =====================================Stack related definitions==================================== */
/** The size of the interpreter stack */
#define STACK_SIZE 128
#define push(A) do { sp++;stack[sp]=A; if(sp >= STACK_SIZE) printf("Stack overflow");} while(false)
#define pop(A) do{ A=stack[sp];sp--; if(sp < -1) printf("Stack underflow");} while(false)
/** ================================================================================================== */
/** The number of training instances that the individual is to be evaluated for */
#define PROBLEM_SIZE 1024
#define BLOCK_SIZE 512 // Used for the shared memory definitions
/************************************************************************************************************
************************************************************************************************************/
//TODO DOC: sadly there is only support for 1 pitch value for all input instances (which should be more than enough)
extern "C"
__global__ void evaluate(double* x,double* y, int inputPitch,
double* output, int outputPitch,
const char* __restrict__ individuals, const int indCounts, const int maxLength)
{
int blockIndex = blockIdx.x;
int threadIndex = threadIdx.x;
if (blockIndex >= indCounts)
return;
// Obtain pointer to the beginning of the memory space of the individual that
// this block will evaluate
const char* __restrict__ expression = &(individuals[blockIndex * maxLength]);
double* blockOutput = &(output[blockIndex * outputPitch]);
// the first thread should reset these values
// if (threadIndex == 0) {
// fitnesses[blockIndex] = 0;
// }
double stack[STACK_SIZE]; // The stack is defined as the same type as the kernel output
int sp;
// Determine how many fitness cases this thread should process
int portion = (PROBLEM_SIZE - 1)/ blockDim.x + 1;
for (int i = 0 ; i < portion; i++) {
// Thread to data index mapping with respect to the loop variable
int tid = portion * threadIndex + i;
if (tid >= PROBLEM_SIZE)
break;
// Reset the stack pointer
sp = - 1;
int k = 0; // Maintains the current index in the expression
while(expression[k] != 0)
{
switch(expression[k])
{
case 1: {push(x[tid]);
}break;
case 2: {push(y[tid]);
}break;
case 3: {double second;pop(second);
double first;pop(first);
double final = first + second;
push(final);
}break;
case 4: {double second;pop(second);
double first;pop(first);
double final = first - second;
push(final);
}break;
case 5: {double second;pop(second);
double first;pop(first);
double final = first * second;
push(final);
}break;
default:printf("Unrecognized OPCODE in the expression tree!");break;
}
k++;
}
// Pop the top of the stack
double stackTop;
pop(stackTop);
if(sp!=-1)
printf("Stack pointer is not -1 but is %d", sp);
// Assign the top of the stack to the output
blockOutput[tid] = stackTop;
}
}
|
78d7319fb88635fcbb298530ae5e28e5c221c0da.cu
|
/**
* This is a generic evaluator template. This file should be manually edited
* and adapted to the problem at hand
*
* @author Mehran Maghoumi
*
*/
/** =====================================Stack related definitions==================================== */
/** The size of the interpreter stack */
#define STACK_SIZE 128
#define push(A) do { sp++;stack[sp]=A; if(sp >= STACK_SIZE) printf("Stack overflow");} while(false)
#define pop(A) do{ A=stack[sp];sp--; if(sp < -1) printf("Stack underflow");} while(false)
/** ================================================================================================== */
/** The number of training instances that the individual is to be evaluated for */
#define PROBLEM_SIZE 1024
#define BLOCK_SIZE 512 // Used for the shared memory definitions
/************************************************************************************************************
************************************************************************************************************/
//TODO DOC: sadly there is only support for 1 pitch value for all input instances (which should be more than enough)
extern "C"
__global__ void evaluate(double* x,double* y, int inputPitch,
double* output, int outputPitch,
const char* __restrict__ individuals, const int indCounts, const int maxLength)
{
int blockIndex = blockIdx.x;
int threadIndex = threadIdx.x;
if (blockIndex >= indCounts)
return;
// Obtain pointer to the beginning of the memory space of the individual that
// this block will evaluate
const char* __restrict__ expression = &(individuals[blockIndex * maxLength]);
double* blockOutput = &(output[blockIndex * outputPitch]);
// the first thread should reset these values
// if (threadIndex == 0) {
// fitnesses[blockIndex] = 0;
// }
double stack[STACK_SIZE]; // The stack is defined as the same type as the kernel output
int sp;
// Determine how many fitness cases this thread should process
int portion = (PROBLEM_SIZE - 1)/ blockDim.x + 1;
for (int i = 0 ; i < portion; i++) {
// Thread to data index mapping with respect to the loop variable
int tid = portion * threadIndex + i;
if (tid >= PROBLEM_SIZE)
break;
// Reset the stack pointer
sp = - 1;
int k = 0; // Maintains the current index in the expression
while(expression[k] != 0)
{
switch(expression[k])
{
case 1: {push(x[tid]);
}break;
case 2: {push(y[tid]);
}break;
case 3: {double second;pop(second);
double first;pop(first);
double final = first + second;
push(final);
}break;
case 4: {double second;pop(second);
double first;pop(first);
double final = first - second;
push(final);
}break;
case 5: {double second;pop(second);
double first;pop(first);
double final = first * second;
push(final);
}break;
default:printf("Unrecognized OPCODE in the expression tree!");break;
}
k++;
}
// Pop the top of the stack
double stackTop;
pop(stackTop);
if(sp!=-1)
printf("Stack pointer is not -1 but is %d", sp);
// Assign the top of the stack to the output
blockOutput[tid] = stackTop;
}
}
|
e4fdbc5d72ceb11b234dc28aeb04a4e51eb77bb8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "Cuda4RNN.h"
void trainMultiThread(
float* lossAllVec,
Para* para
)
{
int num_sces = para->h_num_sces[0];
int total_epoches = para->h_total_epoches[0];
int n_features = para->h_n_features[0];
int n_hidden = para->h_n_hidden[0];
int n_output_classes = para->h_n_output_classes[0];
float alpha = para->h_alpha[0];
float score_min = para->h_score_min[0];
float score_max = para->h_score_max[0];
{
cout << "train begins" << endl << "alpha: " << alpha << endl;
cout << "total_epoches: " << total_epoches << endl
<< "n_features: " << n_features << endl;
cout << "n_hidden: " << n_hidden << endl
<< "n_output_classes: " << n_output_classes << endl;
cout << "score_min: " << score_min << endl
<< "score_max: " << score_max << endl;
}
const int n_streams = 10;
hipStream_t stream[n_streams];
for (int i = 0; i < n_streams; i++)
{
hipStreamCreate(&stream[i]);
}
hipblasHandle_t handle;
hipblasCreate(&handle);
// SGD1cpu
float* loss_one_epoch;
float* loss_mean_each_epoch;
float* true_false;
float* accuracy_each_epoch;
float loss = 0.f;
hipMalloc((void**)&loss_one_epoch, total_epoches * sizeof(float));
hipMalloc((void**)&loss_mean_each_epoch, total_epoches * sizeof(float));
hipMalloc((void**)&true_false, total_epoches * sizeof(float));
hipMalloc((void**)&accuracy_each_epoch, total_epoches * sizeof(float));
int total_size = para->h_total_size[0];
int Nmax = para->d_Nmax[0];
int M = para->h_sces_data_mn[0];
float* sce_item_data; // scedata
hipMalloc((void**)&sce_item_data, total_size*sizeof(float));
float* hprev;
hipMalloc((void**)&hprev, n_hidden * sizeof(float));
// total_epoches
for (int i = 0; i < 21; i++)
{
/**
loop over each scenario
*/
for(int item = 0; item < num_sces; item++)
{
// ---------- RNN ------------
float id0 = para->h_sces_id_score[item*2 + 0];
float score0 = para->h_sces_id_score[item*2 + 1];
int sce0_M = para->h_sces_data_mn[item * 2 + 0];
int sce0_N = para->h_sces_data_mn[item * 2 + 1];
int beginIdx = para->h_sces_data_idx_begin[item];
int endIdx = para->h_sces_data_idx_begin[item + 1];
//gpu_copy(sce_item_data, 0, para->d_sces_data, beginIdx, endIdx);
sce_item_data = para->d_sces_data + beginIdx;
//
//gpu_clear_arr(true_false, total_epoches);//true_false.clear();
//gpu_clear_arr(loss_one_epoch, total_epoches);//loss_one_epoch.clear();
// set dP 0
gpu_fill(para->d_dWxh, n_hidden * n_features, 0.f);
gpu_fill(para->d_dWhh, n_hidden * n_hidden, 0.f);
gpu_fill(para->d_dWhy, n_hidden * n_output_classes, 0.f);
gpu_fill(para->d_dbh, n_hidden, 0.f);
gpu_fill(para->d_dby, n_output_classes, 0.f);
gpu_fill(hprev, n_hidden, 0.f);
lossFun(handle,
sce_item_data, sce0_M, sce0_N,
score0,
hprev, true_false,
loss,
para,
stream);
if (i % 10 == 0)
{
cout << "epoch: " << i << ", loss: " << loss << endl;
}
sgd(handle, para);
}
}
// lossVec mean, accu
// free resource
;{
deInitPara(para);
hipFree(hprev);
hipFree(sce_item_data);
hipFree(loss_one_epoch);
hipFree(loss_mean_each_epoch);
hipFree(true_false);
hipFree(accuracy_each_epoch);
cout << "free over in train fn \n";
}
hipblasDestroy(handle);
for (int i = 0; i < n_streams; i++)
{
hipStreamDestroy(stream[i]);
}
}
/*
arma::mat in RNN-v2 => device_vector.
inputs: data of a scenario
M: n_rows of orig. inputs. M=17 signals
N: n_cols of orig. inputs. N matlabmatDataZScoretime
structfn hipMallocManaged struct
*/
void lossFun(
hipblasHandle_t handle,
float* inputs, int M, int N,
float score,
float* hprev,
float* true_false,
float& loss,
Para* para,
hipStream_t stream[]
)
{
int total_epoches = para->h_total_epoches[0];
int n_features = para->h_n_features[0];
int n_hidden = para->h_n_hidden[0];
int n_output_classes = para->h_n_output_classes[0];
float alpha = para->h_alpha[0];
float score_min = para->h_score_min[0];
float score_max = para->h_score_max[0];
int Nmax = para->d_Nmax[0]; // N
int idx1_targets = -1;
score2onehot(score,
idx1_targets, n_output_classes, score_min, score_max);
// hs[-1] = hprev;
gpu_set_col(para->d_hs, n_hidden, Nmax, -1+1, hprev);
loss = 0.f;
// ---------------- forward pass -------------
for (int t = 0; t < N; t++)
{
// ----- xs[t] = inputs.row(t).t(); ----- xs inputscopy
//gpu_get_col(inputs, M, N, t, para->d_tmp_d_vec); // tmp saves xs[t]
//gpu_set_col(para->d_xs, n_features, Nmax, t, para->d_tmp_d_vec);
// --- hs[t] = arma::tanh(Wxh * xs[t] + Whh*hs[t-1] + bh); ----
// Wxh * xs[t]
// hs[t-1]
/*gpu_get_col(para->d_hs, n_hidden, Nmax, t - 1 + 1,
para->d_tmp_d_vec2);*/ // tmp2 saves hs[t-1]
// Whh * hs[t-1]
/*gpu_mv(handle, para->Whh, para->d_tmp_d_vec, n_hidden, n_hidden,
para->d_W_tmp2);*/ // W_tmp2 saves Whh*hs[t-1]
/*gpu_tanh_add_add(para->d_W_tmp1, para->d_W_tmp2, para->bh, n_hidden,
para->d_tmp_d_vec);*/ // tmp saves tanh_add_add
gpu_tanh_Mv_add_Mv_add_v(handle,
para->d_Wxh, n_hidden, n_features,
inputs + t*M, // xs[t]
para->d_Whh, n_hidden, n_hidden,
para->d_hs + n_hidden*(t-1+1), // hs[t-1]
para->d_bh,
para->d_hs + n_hidden*(t+1), // dest => hs[t]
para,
stream);
//gpu_set_col(para->d_hs, n_hidden, Nmax, t + 1, para->d_W_tmp3);
if (t == N-1)
{
// ys[t] = Why * hs[t] + by;
/*gpu_get_col(para->d_hs, n_hidden, Nmax, t + 1,
para->d_tmp_d_vec);*/ // tmp saves hs[t]
gpu_mv(handle, para->d_Why,
para->d_hs + n_hidden*(t+1), // hs[t]
n_output_classes, n_hidden,
para->d_W_tmp1); // Why * hs[t]
gpu_add(para->d_W_tmp1, para->d_by, n_output_classes,
para->d_ys + n_output_classes*t, stream[0]); // tmp saves ys[t]
hipStreamSynchronize(stream[0]);
//gpu_set_col(para->d_ys, n_output_classes, Nmax, t, para->d_tmp_d_vec);
// ps[t] = softmax(ys[t])
int sum1 = n_features + n_features + n_output_classes;
gpu_clear_arr(para->d_W_tmp1, sum1*sum1);
gpu_get_col(para->d_ys, n_output_classes, Nmax, t, para->d_tmp_d_vec);
gpu_softmax(para->d_tmp_d_vec, n_output_classes,
para->d_W_tmp1, // dest => ps[t]
para->d_W_tmp2); // cache
gpu_set_col(para->d_ps, n_output_classes, Nmax, t,
para->d_W_tmp1); // d_W_tmp1 = softmax = ps[t]
// loss += -log(ps[t](idx1));
/*float val = para->d_W_tmp1[idx1_targets];
loss += -logf(val);*/
hipMemcpy(para->h_cache, para->d_W_tmp1, sum1*sum1 * sizeof(float), hipMemcpyDeviceToHost);
float val = para->h_cache[idx1_targets];
loss = -logf(val);
// idx_pred
gpu_max_index(para->d_W_tmp1, sum1*sum1, para->d_W_tmp2);
}
}
// ---------------- BPTT -------------
gpu_fill(para->d_dWxh, n_hidden*n_features, 0.f);
gpu_fill(para->d_dWhh, n_hidden*n_hidden, 0.f);
gpu_fill(para->d_dWhy, n_hidden*n_output_classes, 0.f);
gpu_fill(para->d_dbh, n_hidden, 0.f);
gpu_fill(para->d_dby, n_output_classes, 0.f);
gpu_fill(para->d_dhnext, n_hidden, 0.f); // init dhnext = 0
for (int t = N-1; t >= 0; t--)
{
if (t == N-1)
{
// dy = ps[t];
gpu_get_col(para->d_ps, n_output_classes, Nmax, t, para->d_dy);
// uvec fuvec = arma::find(targets == 1);
// dy[fuvec(0)] -= 1;
// ??? para->d_dy[idx1_targets] -= 1.f;
gpu_update_dy(para->d_dy, n_output_classes, idx1_targets);
// dWhy += dy * hs[t].t(); /// dy(10,1) * hs[t].t()(1,50) = (10,50)
/*gpu_get_col(para->d_hs, n_hidden, Nmax, t + 1,
para->d_tmp_d_vec);*/ // tmp saves hs[t]'
gpu_mmul(handle, para->d_dy,
para->d_hs+n_hidden*(t+1), // hs[t]'
n_output_classes, 1, n_hidden,
para->d_W_tmp1, stream[0]); // Wtmp1 saves dy*hs[t]'
gpu_add(para->d_dWhy, para->d_W_tmp1, n_output_classes*n_hidden,
para->d_dWhy, stream[0]);
// dby += dy;
gpu_add(para->d_dby, para->d_dy, n_output_classes, para->d_dby, stream[1]);
// dh = Why.t() * dy + dhnext;
gpu_mv(handle, para->d_Why, para->d_dy, n_output_classes, n_hidden,
para->d_W_tmp1, true); // Wtmp1 saves Why' * dy
gpu_add(para->d_W_tmp1, para->d_dhnext, n_hidden, para->d_dh, stream[2]);
hipStreamSynchronize(stream[2]);
// dhraw = (1 - hs[t] % hs[t]) % dh; // mul elemwise
/*gpu_get_col(para->d_hs, n_hidden, Nmax, t + 1,
para->d_tmp_d_vec); */// tmp saves hs[t]
gpu_tanh_der_hs_dh(para->d_hs + n_hidden*(t+1), // hs[t]
para->d_dh, n_hidden,
para->d_dhraw);
// dbh += dhraw;
gpu_add(para->d_dbh, para->d_dhraw, n_hidden, para->d_dbh, stream[3]);
// dWxh += dhraw * xs[t].t(); // loop
/*gpu_get_col(para->d_xs, n_features, Nmax, t,
para->d_tmp_d_vec);*/ // tmp saves xs[t]
gpu_mmul(handle, para->d_dhraw,
inputs + t*M, // xs[t]
n_hidden, 1, n_features,
para->d_W_tmp1, stream[4]); // Wtmp1 saves dhraw*xs[t]'
gpu_add(para->d_dWxh, para->d_W_tmp1, n_hidden*n_features,
para->d_dWxh, stream[4]);
// dWhh += dhraw * hs[t - 1].t();
/*gpu_get_col(para->d_hs, n_hidden, Nmax, t-1+1,
para->d_tmp_d_vec);*/ // tmp saves hs[t-1]
gpu_mmul(handle, para->d_dhraw,
para->d_hs + n_hidden * (t-1+1), // hs[t-1]
n_hidden, 1, n_hidden,
para->d_W_tmp1, stream[5]); // Wtmp1 saves dhraw*hs[t-1]'
gpu_add(para->d_dWhh, para->d_W_tmp1, n_hidden*n_hidden,
para->d_dWhh, stream[5]);
// dhnext = Whh.t() * dhraw;
gpu_mv(handle, para->d_Whh, para->d_dhraw, n_hidden, n_hidden,
para->d_dhnext, true);
for (int i = 0; i <= 5; i++)
{
hipStreamSynchronize(stream[i]);
}
}
else
{
// dh = dhnext;
para->d_dh = para->d_dhnext;
// dhraw = (1 - hs[t] % hs[t]) % dh; // mul elemwise
/*gpu_get_col(para->d_hs, n_hidden, Nmax, t + 1,
para->d_tmp_d_vec); */// tmp saves hs[t]
gpu_tanh_der_hs_dh(para->d_hs + n_hidden*(t+1), // hs[t]
para->d_dh, n_hidden,
para->d_dhraw);
// dbh, dWxh, dWhh, dhnext
// dbh += dhraw;
gpu_add(para->d_dbh, para->d_dhraw, n_hidden, para->d_dbh, stream[0]);
// dWxh += dhraw * xs[t].t(); // loop
/*gpu_get_col(para->d_xs, n_features, Nmax, t,
para->d_tmp_d_vec);*/ // tmp saves xs[t]
gpu_mmul(handle, para->d_dhraw,
inputs + t*M, // xs[t]
n_hidden, 1, n_features,
para->d_W_tmp1, stream[1]); // Wtmp1 saves dhraw*xs[t]'
gpu_add(para->d_dWxh, para->d_W_tmp1, n_hidden*n_features,
para->d_dWxh, stream[1]);
// dWhh += dhraw * hs[t - 1].t();
/*gpu_get_col(para->d_hs, n_hidden, Nmax, t - 1 + 1,
para->d_tmp_d_vec);*/ // tmp saves hs[t-1]
gpu_mmul(handle, para->d_dhraw,
para->d_hs + n_hidden*(t-1+1), // hs[t-1]
n_hidden, 1, n_hidden,
para->d_W_tmp2, stream[2]); // Wtmp1 saves dhraw*hs[t-1]'
gpu_add(para->d_dWhh, para->d_W_tmp2, n_hidden*n_hidden,
para->d_dWhh, stream[2]);
// dhnext = Whh.t() * dhraw;
gpu_mv(handle, para->d_Whh, para->d_dhraw, n_hidden, n_hidden,
para->d_dhnext, true);
hipStreamSynchronize(stream[0]);
hipStreamSynchronize(stream[1]);
hipStreamSynchronize(stream[2]);
}
}
// clip
gpu_clip(para->d_dWxh, n_hidden*n_features,-5.f, 5.f);
gpu_clip(para->d_dWhh, n_hidden*n_hidden,-5.f, 5.f);
gpu_clip(para->d_dWhy, n_hidden*n_output_classes,-5.f, 5.f);
gpu_clip(para->d_dbh, n_hidden,-5.f, 5.f);
gpu_clip(para->d_dby, n_output_classes,-5.f, 5.f);
}
void score2onehot(float score,
int & idx1_targets, int n_output_classes, float score_min, float score_max)
{
float part = 1.0f / n_output_classes;
float pos = (score - score_min) / (score_max - score_min + pow(10, -4));
idx1_targets = floor(pos / part);
//// init onehot with 0
//gpu_fill(onehot, n_output_classes, 0.f);
//// set 1
//onehot[idx1_targets] = 1.f;
//gpu_fill_onehot(onehot, n_output_classes, idx1_targets);
return;
}
void sgd(hipblasHandle_t handle, Para* para)
{
int n_features = para->h_n_features[0];
int n_hidden = para->h_n_hidden[0];
int n_output_classes = para->h_n_output_classes[0];
float alpha = para->h_alpha[0];
sgd0(handle, para->d_Wxh, para->d_dWxh, n_hidden*n_features, alpha);
sgd0(handle, para->d_Whh, para->d_dWhh, n_hidden*n_hidden, alpha);
sgd0(handle, para->d_Why, para->d_dWhy, n_hidden*n_output_classes, alpha);
sgd0(handle, para->d_bh, para->d_dbh, n_hidden, alpha);
sgd0(handle, para->d_by, para->d_dby, n_output_classes, alpha);
}
void sgd0(hipblasHandle_t handle, float * P, float * dP,
int size, float alpha)
{
// P = - alpha * dP + P
// hipblasSaxpy: y = a * x + y
//hipblasStatus_t stat;
float a = -alpha;
hipblasSaxpy(handle,
size, // num of elems in P or dP
&a,
dP, 1,
P, 1); // res into P
//cout << "stat: " << stat << endl;
hipDeviceSynchronize();
// cublas
}
void test_gpu_fns()
{
hipblasHandle_t handle;
hipblasCreate(&handle);
// ===========================================
const int M = 10;
const int K = 11;
const int N = 70;
const int size1 = M*K;
const int size2 = K*N;
const int size3 = M*N;
float* d_in1 = NULL;
float* d_in2 = NULL;
float* d_out = NULL;
float* d_x;
float* d_x2;
float* d_x3;
hipMallocManaged((void**)&d_in1, size1 * sizeof(float));
hipMallocManaged((void**)&d_in2, size2 * sizeof(float));
hipMallocManaged((void**)&d_out, size3 * sizeof(float));
hipMallocManaged((void**)&d_x, K * sizeof(float));
hipMallocManaged((void**)&d_x2, K * sizeof(float));
hipMallocManaged((void**)&d_x3, K * sizeof(float));
hipDeviceSynchronize();
//printToHost(d_in1, M, K, "d_in1 initial");
//printToHost(d_in2, K, N, "d_in2 initial");
gpu_fill(d_in1, size1, 3.f);
gpu_fill_rand(d_in2, size2, 1, -0.1f, 0.1f, 111);
gpu_fill(d_x, K, 0.f); // d_x
gpu_fill_rand(d_x2, 1, K, -4.f, 4.f, 43);
gpu_fill_rand(d_x3, 1, K, 0.f, 1.f, 123);
//printToHost(d_in1, M, K, "in1");
//printToHost(d_in2, K, N, "rand2");
d_x[1] = 0;
//printToHost(d_x, 1, K, "x");
printToHost(d_x2, 1, K, "x2");
//printToHost(d_x3, 1, K, "x3");
// --------- gpu_clip :)-------------
/*gpu_clip(d_x2, K, -1.f, 1.f);
printToHost(d_x2, 1, K, "clip x2");*/
// ------ gpu_max_value -----------
/*float* cache;
hipMallocManaged((void**)&cache, K * sizeof(float));
printToHost(cache, 1, K, "init cache");
float x2_max = gpu_max_value(d_x2, K, cache);
cout << "max val of x2: " << x2_max << endl;
int idx = gpu_max_index(d_x2, K, cache);
cout << "index of max val of x2: " << idx << endl;*/
// ----------- gpu_sum -------------
/*float* cache;
hipMallocManaged((void**)&cache, K * sizeof(float));
printToHost(cache, 1, K, "init cache");
float s = gpu_sum(d_x, K, cache);
cout << "sum of x: " << s << endl;
printToHost(cache, 1, K+5, "cache");*/
// --------- gpu_softmax -----------
/*float* soft;
hipMallocManaged((void**)&soft, K * sizeof(float));
gpu_softmax(d_x, K, soft, cache);
printToHost(soft, 1, K, "softmax of x");*/
// ------------ gpu_scal -----------
/*float* dest;
hipMallocManaged((void**)&dest, K * sizeof(float));
gpu_scal(d_x2, K, 0.1, dest);
printToHost(dest, 1, K, "scal of x2");*/
// -------- gpu_tanh_add_add :) --------------
/*float* dest;
hipMallocManaged((void**)&dest, K * sizeof(float));
gpu_tanh_add_add(d_x, d_x2, d_x3, K, dest);
printToHost(dest, 1, K, "tanh(v1+v2+v3)");*/
// ------------ gpu_tanh :)------------------
/*float* res_tanh;
hipMallocManaged((void**)&res_tanh, K * sizeof(float));
gpu_tanh(d_x, K, res_tanh);
printToHost(res_tanh, 1, K, "tanh(x)");*/
// ----------- gpu_mul_elemwise :)--------
//float* mul;
//hipMallocManaged((void**)&mul, M*K * sizeof(float));
//gpu_mul_elemwise(d_in1, d_in1, M*K, mul);
//printToHost(mul, M, K, "mul.");
// ----------- gpu_add :) --------------------
/*float* add;
hipMallocManaged((void**)&add, M*K * sizeof(float));
gpu_add(d_in1, d_in1, M*K, add);
printToHost(add, M, K, "add");*/
// -------------- gpu_mmul :)--------------
/*gpu_mmul(handle, d_in1, d_in2, M, K, N, d_out);
printToHost(d_out, M, N, "in1 * in2");*/
// -------------- gpu_mv :)--------------
/*float* Ax;
hipMallocManaged((void**)&Ax, M * sizeof(float));
gpu_mv(handle, d_in1, d_x, M, K, Ax, false);
printToHost(Ax, M, 1, "Ax");*/
// ------------ get get/set col :) -----------
/*float* col1;
hipMallocManaged((void**)&col1, M * sizeof(float));
gpu_get_col(d_in, M, N, 1, col1);
printToHost(col1, M, 1, "col1");
float* setVal;
hipMallocManaged((void**)&setVal, M, 1);
gpu_fill(setVal, M * 1, 2.3f);
gpu_set_col(d_in, M, N, 3, setVal);
printToHost(d_in, M, N, "set col3 to 2.3");*/
// --------- gpu_copy :) --------------
/*float* d_cp = NULL;
hipMallocManaged((void**)&d_cp, 2*M * sizeof(float));
gpu_copy(d_cp, 5, d_in, 0, M);
printToHost(d_cp, M, 2, "copy first col of d_in");*/
// ----------- score2onehot :) -------
/*int idx1;
float* onehot = score2onehot(7.0f, idx1, 10, 6.0, 8.9);
cout << "idx1: " << idx1 << endl;
for (int i = 0; i < 10; i++)
{
cout << onehot[i] << " " << endl;
}
cout << endl;*/
// ------ gpu_clear_arr -----------
//gpu_clear_arr(d_in, size);
//printToHost(d_in, M, N, "clear to 0");
// ------ fill rand :) --------
/*gpu_fill_rand(d_in, size, 1, -0.1f, 0.1f, 11);
printToHost(d_in, M, N, "rand");
// ------- gpu_copy(); :) ---------------
float* d_cp = NULL;
hipMallocManaged((void**)&d_cp, 3 * sizeof(float));
gpu_copy(d_cp, d_in, 1, 4);
printToHost(d_cp, 1, 3, "copy");
// ---------- gpu_fill(); :)-----------
gpu_fill(d_in, size, 2.3);
printToHost(d_in, M, N, "fill with 2.3");
// free
hipFree(d_cp);*/
hipFree(d_in1);
hipFree(d_in2);
hipFree(d_out);
hipblasDestroy(handle);
}
void initPara(Para * para, int Nmax)
{
// malloc
hipHostMalloc((void**)¶->h_total_epoches, sizeof(float));
hipHostMalloc((void**)¶->h_n_features, sizeof(float));
hipHostMalloc((void**)¶->h_n_hidden, sizeof(float));
hipHostMalloc((void**)¶->h_n_output_classes, sizeof(float));
hipHostMalloc((void**)¶->h_alpha, sizeof(float));
hipHostMalloc((void**)¶->h_score_min, sizeof(float));
hipHostMalloc((void**)¶->h_score_max, sizeof(float));
hipMalloc((void**)¶->d_total_epoches, sizeof(float));
hipMalloc((void**)¶->d_n_features, sizeof(float));
hipMalloc((void**)¶->d_n_hidden, sizeof(float));
hipMalloc((void**)¶->d_n_output_classes, sizeof(float));
hipMalloc((void**)¶->d_alpha, sizeof(float));
hipMalloc((void**)¶->d_score_min, sizeof(float));
hipMalloc((void**)¶->d_score_max, sizeof(float));
// malloc dP, only in GPU needed
hipMalloc((void**)¶->d_dWxh, n_hidden*n_features * sizeof(float));
hipMalloc((void**)¶->d_dWhh, n_hidden*n_hidden * sizeof(float));
hipMalloc((void**)¶->d_dWhy, n_hidden*n_output_classes * sizeof(float));
hipMalloc((void**)¶->d_dbh, n_hidden * sizeof(float));
hipMalloc((void**)¶->d_dby, n_output_classes * sizeof(float));
hipMalloc((void**)¶->d_dhnext, n_hidden * sizeof(float));
hipMalloc((void**)¶->d_dy, n_output_classes * sizeof(float));
hipMalloc((void**)¶->d_dh, n_hidden * sizeof(float));
hipMalloc((void**)¶->d_dhraw, n_hidden * sizeof(float));
// malloc params of RNN
hipHostMalloc((void**)¶->h_Wxh, n_hidden*n_features * sizeof(float));
hipHostMalloc((void**)¶->h_Whh, n_hidden*n_hidden * sizeof(float));
hipHostMalloc((void**)¶->h_Why, n_hidden*n_output_classes * sizeof(float));
hipHostMalloc((void**)¶->h_bh, n_hidden * sizeof(float));
hipHostMalloc((void**)¶->h_by, n_output_classes * sizeof(float));
hipMalloc((void**)¶->d_Wxh, n_hidden*n_features * sizeof(float));
hipMalloc((void**)¶->d_Whh, n_hidden*n_hidden * sizeof(float));
hipMalloc((void**)¶->d_Why, n_hidden*n_output_classes * sizeof(float));
hipMalloc((void**)¶->d_bh, n_hidden * sizeof(float));
hipMalloc((void**)¶->d_by, n_output_classes * sizeof(float));
// malloc state, only in GPU needed
hipMalloc((void**)¶->d_xs, n_features*Nmax * sizeof(float));
hipMalloc((void**)¶->d_hs, n_hidden*(Nmax + 1) * sizeof(float));
hipMalloc((void**)¶->d_ys, n_output_classes*Nmax * sizeof(float));
hipMalloc((void**)¶->d_ps, n_output_classes*Nmax * sizeof(float));
hipMallocManaged((void**)¶->d_Nmax, sizeof(float));
para->d_Nmax[0] = Nmax;
// malloc cache
int sum1 = n_features + n_features + n_output_classes;
hipMalloc((void**)¶->d_tmp_d_vec, sum1 * sizeof(float));
hipMalloc((void**)¶->d_tmp_d_vec2, sum1 * sizeof(float));
hipMalloc((void**)¶->d_W_tmp1, sum1*sum1 * sizeof(float));
hipMalloc((void**)¶->d_W_tmp2, sum1*sum1 * sizeof(float));
hipMalloc((void**)¶->d_W_tmp3, sum1*sum1 * sizeof(float));
hipHostMalloc((void**)¶->h_cache, sum1*sum1 * sizeof(float));
}
void deInitPara(Para * para)
{
// free
hipHostFree(para->h_total_epoches);
hipHostFree(para->h_n_features);
hipHostFree(para->h_n_hidden);
hipHostFree(para->h_n_output_classes);
hipHostFree(para->h_alpha);
hipHostFree(para->h_score_min);
hipHostFree(para->h_score_max);
hipFree(para->d_total_epoches);
hipFree(para->d_n_features);
hipFree(para->d_n_hidden);
hipFree(para->d_n_output_classes);
hipFree(para->d_alpha);
hipFree(para->d_score_min);
hipFree(para->d_score_max);
// free dP, only GPU
hipFree(para->d_dWxh);
hipFree(para->d_dWhh);
hipFree(para->d_dWhy);
hipFree(para->d_dbh);
hipFree(para->d_dby);
hipFree(para->d_dhnext);
hipFree(para->d_dy);
hipFree(para->d_dh);
hipFree(para->d_dhraw);
// free params of RNN
hipHostFree(para->h_Wxh);
hipHostFree(para->h_Whh);
hipHostFree(para->h_Why);
hipHostFree(para->h_bh);
hipHostFree(para->h_by);
hipFree(para->d_Wxh);
hipFree(para->d_Whh);
hipFree(para->d_Why);
hipFree(para->d_bh);
hipFree(para->d_by);
// free sces
hipHostFree(para->h_sces_id_score);
hipHostFree(para->h_sces_data);
hipHostFree(para->h_sces_data_mn);
hipHostFree(para->h_sces_data_idx_begin);
hipHostFree(para->h_num_sces);
hipHostFree(para->h_total_epoches);
hipFree(para->d_sces_id_score);
hipFree(para->d_sces_data);
hipFree(para->d_sces_data_mn);
hipFree(para->d_sces_data_idx_begin);
hipFree(para->d_num_sces);
hipFree(para->d_total_epoches);
// free state, inly GPU
hipFree(para->d_xs);
hipFree(para->d_hs);
hipFree(para->d_ys);
hipFree(para->d_ps);
hipFree(para->d_Nmax);
// free cache, only GPU
int sum1 = n_features + n_features + n_output_classes;
hipFree(para->d_tmp_d_vec);
hipFree(para->d_tmp_d_vec2);
hipFree(para->d_W_tmp1);
hipFree(para->d_W_tmp2);
hipFree(para->d_W_tmp3);
}
|
e4fdbc5d72ceb11b234dc28aeb04a4e51eb77bb8.cu
|
#include "Cuda4RNN.h"
void trainMultiThread(
float* lossAllVec,
Para* para
)
{
int num_sces = para->h_num_sces[0];
int total_epoches = para->h_total_epoches[0];
int n_features = para->h_n_features[0];
int n_hidden = para->h_n_hidden[0];
int n_output_classes = para->h_n_output_classes[0];
float alpha = para->h_alpha[0];
float score_min = para->h_score_min[0];
float score_max = para->h_score_max[0];
{
cout << "train begins" << endl << "alpha: " << alpha << endl;
cout << "total_epoches: " << total_epoches << endl
<< "n_features: " << n_features << endl;
cout << "n_hidden: " << n_hidden << endl
<< "n_output_classes: " << n_output_classes << endl;
cout << "score_min: " << score_min << endl
<< "score_max: " << score_max << endl;
}
const int n_streams = 10;
cudaStream_t stream[n_streams];
for (int i = 0; i < n_streams; i++)
{
cudaStreamCreate(&stream[i]);
}
cublasHandle_t handle;
cublasCreate(&handle);
// 先用 标准SGD优化,使用1个cpu线程
float* loss_one_epoch;
float* loss_mean_each_epoch;
float* true_false;
float* accuracy_each_epoch;
float loss = 0.f;
cudaMalloc((void**)&loss_one_epoch, total_epoches * sizeof(float));
cudaMalloc((void**)&loss_mean_each_epoch, total_epoches * sizeof(float));
cudaMalloc((void**)&true_false, total_epoches * sizeof(float));
cudaMalloc((void**)&accuracy_each_epoch, total_epoches * sizeof(float));
int total_size = para->h_total_size[0];
int Nmax = para->d_Nmax[0];
int M = para->h_sces_data_mn[0];
float* sce_item_data; // 给此分配和 scedata一样的大小
cudaMalloc((void**)&sce_item_data, total_size*sizeof(float));
float* hprev;
cudaMalloc((void**)&hprev, n_hidden * sizeof(float));
// total_epoches
for (int i = 0; i < 21; i++)
{
/**
loop over each scenario
*/
for(int item = 0; item < num_sces; item++)
{
// ---------- 先取出一个 场景数据 训练 RNN ------------
float id0 = para->h_sces_id_score[item*2 + 0];
float score0 = para->h_sces_id_score[item*2 + 1];
int sce0_M = para->h_sces_data_mn[item * 2 + 0];
int sce0_N = para->h_sces_data_mn[item * 2 + 1];
int beginIdx = para->h_sces_data_idx_begin[item];
int endIdx = para->h_sces_data_idx_begin[item + 1];
//gpu_copy(sce_item_data, 0, para->d_sces_data, beginIdx, endIdx);
sce_item_data = para->d_sces_data + beginIdx;
// 使用浅拷贝
//gpu_clear_arr(true_false, total_epoches);//true_false.clear();
//gpu_clear_arr(loss_one_epoch, total_epoches);//loss_one_epoch.clear();
// set dP 0
gpu_fill(para->d_dWxh, n_hidden * n_features, 0.f);
gpu_fill(para->d_dWhh, n_hidden * n_hidden, 0.f);
gpu_fill(para->d_dWhy, n_hidden * n_output_classes, 0.f);
gpu_fill(para->d_dbh, n_hidden, 0.f);
gpu_fill(para->d_dby, n_output_classes, 0.f);
gpu_fill(hprev, n_hidden, 0.f);
lossFun(handle,
sce_item_data, sce0_M, sce0_N,
score0,
hprev, true_false,
loss,
para,
stream);
if (i % 10 == 0)
{
cout << "epoch: " << i << ", loss: " << loss << endl;
}
sgd(handle, para);
}
}
// lossVec mean, accu
// free resource
;{
deInitPara(para);
cudaFree(hprev);
cudaFree(sce_item_data);
cudaFree(loss_one_epoch);
cudaFree(loss_mean_each_epoch);
cudaFree(true_false);
cudaFree(accuracy_each_epoch);
cout << "free over in train fn \n";
}
cublasDestroy(handle);
for (int i = 0; i < n_streams; i++)
{
cudaStreamDestroy(stream[i]);
}
}
/*
arma::mat in RNN-v2 => device_vector.
inputs: data of a scenario
M: n_rows of orig. inputs. 目前的 M=17 即signals的数目
N: n_cols of orig. inputs. N 是matlab中matDataZScore的行数即time步
注意:参数中有struct,当调用这个fn时,应先 cudaMallocManaged struct
*/
void lossFun(
cublasHandle_t handle,
float* inputs, int M, int N,
float score,
float* hprev,
float* true_false,
float& loss,
Para* para,
cudaStream_t stream[]
)
{
int total_epoches = para->h_total_epoches[0];
int n_features = para->h_n_features[0];
int n_hidden = para->h_n_hidden[0];
int n_output_classes = para->h_n_output_classes[0];
float alpha = para->h_alpha[0];
float score_min = para->h_score_min[0];
float score_max = para->h_score_max[0];
int Nmax = para->d_Nmax[0]; // 所有场景的N中最大的数值
int idx1_targets = -1;
score2onehot(score,
idx1_targets, n_output_classes, score_min, score_max);
// hs[-1] = hprev;
gpu_set_col(para->d_hs, n_hidden, Nmax, -1+1, hprev);
loss = 0.f;
// ---------------- forward pass -------------
for (int t = 0; t < N; t++)
{
// ----- xs[t] = inputs.row(t).t(); ----- 因为xs中存储的都是 inputs数据,不必再copy一份
//gpu_get_col(inputs, M, N, t, para->d_tmp_d_vec); // tmp saves xs[t]
//gpu_set_col(para->d_xs, n_features, Nmax, t, para->d_tmp_d_vec);
// --- hs[t] = arma::tanh(Wxh * xs[t] + Whh*hs[t-1] + bh); ----
// Wxh * xs[t]
// hs[t-1]
/*gpu_get_col(para->d_hs, n_hidden, Nmax, t - 1 + 1,
para->d_tmp_d_vec2);*/ // tmp2 saves hs[t-1]
// Whh * hs[t-1]
/*gpu_mv(handle, para->Whh, para->d_tmp_d_vec, n_hidden, n_hidden,
para->d_W_tmp2);*/ // W_tmp2 saves Whh*hs[t-1]
/*gpu_tanh_add_add(para->d_W_tmp1, para->d_W_tmp2, para->bh, n_hidden,
para->d_tmp_d_vec);*/ // tmp saves tanh_add_add
gpu_tanh_Mv_add_Mv_add_v(handle,
para->d_Wxh, n_hidden, n_features,
inputs + t*M, // xs[t]
para->d_Whh, n_hidden, n_hidden,
para->d_hs + n_hidden*(t-1+1), // hs[t-1]
para->d_bh,
para->d_hs + n_hidden*(t+1), // dest => hs[t]
para,
stream);
//gpu_set_col(para->d_hs, n_hidden, Nmax, t + 1, para->d_W_tmp3);
if (t == N-1)
{
// ys[t] = Why * hs[t] + by;
/*gpu_get_col(para->d_hs, n_hidden, Nmax, t + 1,
para->d_tmp_d_vec);*/ // tmp saves hs[t]
gpu_mv(handle, para->d_Why,
para->d_hs + n_hidden*(t+1), // hs[t]
n_output_classes, n_hidden,
para->d_W_tmp1); // Why * hs[t]
gpu_add(para->d_W_tmp1, para->d_by, n_output_classes,
para->d_ys + n_output_classes*t, stream[0]); // tmp saves ys[t]
cudaStreamSynchronize(stream[0]);
//gpu_set_col(para->d_ys, n_output_classes, Nmax, t, para->d_tmp_d_vec);
// ps[t] = softmax(ys[t])
int sum1 = n_features + n_features + n_output_classes;
gpu_clear_arr(para->d_W_tmp1, sum1*sum1);
gpu_get_col(para->d_ys, n_output_classes, Nmax, t, para->d_tmp_d_vec);
gpu_softmax(para->d_tmp_d_vec, n_output_classes,
para->d_W_tmp1, // dest => ps[t]
para->d_W_tmp2); // cache
gpu_set_col(para->d_ps, n_output_classes, Nmax, t,
para->d_W_tmp1); // d_W_tmp1 = softmax = ps[t]
// loss += -log(ps[t](idx1));
/*float val = para->d_W_tmp1[idx1_targets];
loss += -logf(val);*/
cudaMemcpy(para->h_cache, para->d_W_tmp1, sum1*sum1 * sizeof(float), cudaMemcpyDeviceToHost);
float val = para->h_cache[idx1_targets];
loss = -logf(val);
// idx_pred
gpu_max_index(para->d_W_tmp1, sum1*sum1, para->d_W_tmp2);
}
}
// ---------------- BPTT -------------
gpu_fill(para->d_dWxh, n_hidden*n_features, 0.f);
gpu_fill(para->d_dWhh, n_hidden*n_hidden, 0.f);
gpu_fill(para->d_dWhy, n_hidden*n_output_classes, 0.f);
gpu_fill(para->d_dbh, n_hidden, 0.f);
gpu_fill(para->d_dby, n_output_classes, 0.f);
gpu_fill(para->d_dhnext, n_hidden, 0.f); // init dhnext = 0
for (int t = N-1; t >= 0; t--)
{
if (t == N-1)
{
// dy = ps[t];
gpu_get_col(para->d_ps, n_output_classes, Nmax, t, para->d_dy);
// uvec fuvec = arma::find(targets == 1);
// dy[fuvec(0)] -= 1;
// ??? para->d_dy[idx1_targets] -= 1.f;
gpu_update_dy(para->d_dy, n_output_classes, idx1_targets);
// dWhy += dy * hs[t].t(); /// dy(10,1) * hs[t].t()(1,50) = (10,50)
/*gpu_get_col(para->d_hs, n_hidden, Nmax, t + 1,
para->d_tmp_d_vec);*/ // tmp saves hs[t]'
gpu_mmul(handle, para->d_dy,
para->d_hs+n_hidden*(t+1), // hs[t]'
n_output_classes, 1, n_hidden,
para->d_W_tmp1, stream[0]); // Wtmp1 saves dy*hs[t]'
gpu_add(para->d_dWhy, para->d_W_tmp1, n_output_classes*n_hidden,
para->d_dWhy, stream[0]);
// dby += dy;
gpu_add(para->d_dby, para->d_dy, n_output_classes, para->d_dby, stream[1]);
// dh = Why.t() * dy + dhnext;
gpu_mv(handle, para->d_Why, para->d_dy, n_output_classes, n_hidden,
para->d_W_tmp1, true); // Wtmp1 saves Why' * dy
gpu_add(para->d_W_tmp1, para->d_dhnext, n_hidden, para->d_dh, stream[2]);
cudaStreamSynchronize(stream[2]);
// dhraw = (1 - hs[t] % hs[t]) % dh; // mul elemwise
/*gpu_get_col(para->d_hs, n_hidden, Nmax, t + 1,
para->d_tmp_d_vec); */// tmp saves hs[t]
gpu_tanh_der_hs_dh(para->d_hs + n_hidden*(t+1), // hs[t]
para->d_dh, n_hidden,
para->d_dhraw);
// dbh += dhraw;
gpu_add(para->d_dbh, para->d_dhraw, n_hidden, para->d_dbh, stream[3]);
// dWxh += dhraw * xs[t].t(); // 惩罚项,只需要在loop中 加一次
/*gpu_get_col(para->d_xs, n_features, Nmax, t,
para->d_tmp_d_vec);*/ // tmp saves xs[t]
gpu_mmul(handle, para->d_dhraw,
inputs + t*M, // xs[t]
n_hidden, 1, n_features,
para->d_W_tmp1, stream[4]); // Wtmp1 saves dhraw*xs[t]'
gpu_add(para->d_dWxh, para->d_W_tmp1, n_hidden*n_features,
para->d_dWxh, stream[4]);
// dWhh += dhraw * hs[t - 1].t();
/*gpu_get_col(para->d_hs, n_hidden, Nmax, t-1+1,
para->d_tmp_d_vec);*/ // tmp saves hs[t-1]
gpu_mmul(handle, para->d_dhraw,
para->d_hs + n_hidden * (t-1+1), // hs[t-1]
n_hidden, 1, n_hidden,
para->d_W_tmp1, stream[5]); // Wtmp1 saves dhraw*hs[t-1]'
gpu_add(para->d_dWhh, para->d_W_tmp1, n_hidden*n_hidden,
para->d_dWhh, stream[5]);
// dhnext = Whh.t() * dhraw;
gpu_mv(handle, para->d_Whh, para->d_dhraw, n_hidden, n_hidden,
para->d_dhnext, true);
for (int i = 0; i <= 5; i++)
{
cudaStreamSynchronize(stream[i]);
}
}
else
{
// dh = dhnext;
para->d_dh = para->d_dhnext;
// dhraw = (1 - hs[t] % hs[t]) % dh; // mul elemwise
/*gpu_get_col(para->d_hs, n_hidden, Nmax, t + 1,
para->d_tmp_d_vec); */// tmp saves hs[t]
gpu_tanh_der_hs_dh(para->d_hs + n_hidden*(t+1), // hs[t]
para->d_dh, n_hidden,
para->d_dhraw);
// 可并行的有: dbh, dWxh, dWhh, dhnext
// dbh += dhraw;
gpu_add(para->d_dbh, para->d_dhraw, n_hidden, para->d_dbh, stream[0]);
// dWxh += dhraw * xs[t].t(); // 惩罚项,只需要在loop中 加一次
/*gpu_get_col(para->d_xs, n_features, Nmax, t,
para->d_tmp_d_vec);*/ // tmp saves xs[t]
gpu_mmul(handle, para->d_dhraw,
inputs + t*M, // xs[t]
n_hidden, 1, n_features,
para->d_W_tmp1, stream[1]); // Wtmp1 saves dhraw*xs[t]'
gpu_add(para->d_dWxh, para->d_W_tmp1, n_hidden*n_features,
para->d_dWxh, stream[1]);
// dWhh += dhraw * hs[t - 1].t();
/*gpu_get_col(para->d_hs, n_hidden, Nmax, t - 1 + 1,
para->d_tmp_d_vec);*/ // tmp saves hs[t-1]
gpu_mmul(handle, para->d_dhraw,
para->d_hs + n_hidden*(t-1+1), // hs[t-1]
n_hidden, 1, n_hidden,
para->d_W_tmp2, stream[2]); // Wtmp1 saves dhraw*hs[t-1]'
gpu_add(para->d_dWhh, para->d_W_tmp2, n_hidden*n_hidden,
para->d_dWhh, stream[2]);
// dhnext = Whh.t() * dhraw;
gpu_mv(handle, para->d_Whh, para->d_dhraw, n_hidden, n_hidden,
para->d_dhnext, true);
cudaStreamSynchronize(stream[0]);
cudaStreamSynchronize(stream[1]);
cudaStreamSynchronize(stream[2]);
}
}
// clip
gpu_clip(para->d_dWxh, n_hidden*n_features,-5.f, 5.f);
gpu_clip(para->d_dWhh, n_hidden*n_hidden,-5.f, 5.f);
gpu_clip(para->d_dWhy, n_hidden*n_output_classes,-5.f, 5.f);
gpu_clip(para->d_dbh, n_hidden,-5.f, 5.f);
gpu_clip(para->d_dby, n_output_classes,-5.f, 5.f);
}
void score2onehot(float score,
int & idx1_targets, int n_output_classes, float score_min, float score_max)
{
float part = 1.0f / n_output_classes;
float pos = (score - score_min) / (score_max - score_min + pow(10, -4));
idx1_targets = floor(pos / part);
//// init onehot with 0
//gpu_fill(onehot, n_output_classes, 0.f);
//// set 1
//onehot[idx1_targets] = 1.f;
//gpu_fill_onehot(onehot, n_output_classes, idx1_targets);
return;
}
void sgd(cublasHandle_t handle, Para* para)
{
int n_features = para->h_n_features[0];
int n_hidden = para->h_n_hidden[0];
int n_output_classes = para->h_n_output_classes[0];
float alpha = para->h_alpha[0];
sgd0(handle, para->d_Wxh, para->d_dWxh, n_hidden*n_features, alpha);
sgd0(handle, para->d_Whh, para->d_dWhh, n_hidden*n_hidden, alpha);
sgd0(handle, para->d_Why, para->d_dWhy, n_hidden*n_output_classes, alpha);
sgd0(handle, para->d_bh, para->d_dbh, n_hidden, alpha);
sgd0(handle, para->d_by, para->d_dby, n_output_classes, alpha);
}
void sgd0(cublasHandle_t handle, float * P, float * dP,
int size, float alpha)
{
// P = - alpha * dP + P
// cublasSaxpy: y = a * x + y
//cublasStatus_t stat;
float a = -alpha;
cublasSaxpy_v2(handle,
size, // num of elems in P or dP
&a,
dP, 1,
P, 1); // res into P
//cout << "stat: " << stat << endl;
cudaDeviceSynchronize();
// cublas执行后,必须跟一个同步,否则会因为数据同步问题报错。
}
void test_gpu_fns()
{
cublasHandle_t handle;
cublasCreate(&handle);
// ===========================================
const int M = 10;
const int K = 11;
const int N = 70;
const int size1 = M*K;
const int size2 = K*N;
const int size3 = M*N;
float* d_in1 = NULL;
float* d_in2 = NULL;
float* d_out = NULL;
float* d_x;
float* d_x2;
float* d_x3;
cudaMallocManaged((void**)&d_in1, size1 * sizeof(float));
cudaMallocManaged((void**)&d_in2, size2 * sizeof(float));
cudaMallocManaged((void**)&d_out, size3 * sizeof(float));
cudaMallocManaged((void**)&d_x, K * sizeof(float));
cudaMallocManaged((void**)&d_x2, K * sizeof(float));
cudaMallocManaged((void**)&d_x3, K * sizeof(float));
cudaDeviceSynchronize();
//printToHost(d_in1, M, K, "d_in1 initial");
//printToHost(d_in2, K, N, "d_in2 initial");
gpu_fill(d_in1, size1, 3.f);
gpu_fill_rand(d_in2, size2, 1, -0.1f, 0.1f, 111);
gpu_fill(d_x, K, 0.f); // d_x
gpu_fill_rand(d_x2, 1, K, -4.f, 4.f, 43);
gpu_fill_rand(d_x3, 1, K, 0.f, 1.f, 123);
//printToHost(d_in1, M, K, "in1");
//printToHost(d_in2, K, N, "rand2");
d_x[1] = 0;
//printToHost(d_x, 1, K, "x");
printToHost(d_x2, 1, K, "x2");
//printToHost(d_x3, 1, K, "x3");
// --------- gpu_clip :)-------------
/*gpu_clip(d_x2, K, -1.f, 1.f);
printToHost(d_x2, 1, K, "clip x2");*/
// ------ gpu_max_value -----------
/*float* cache;
cudaMallocManaged((void**)&cache, K * sizeof(float));
printToHost(cache, 1, K, "init cache");
float x2_max = gpu_max_value(d_x2, K, cache);
cout << "max val of x2: " << x2_max << endl;
int idx = gpu_max_index(d_x2, K, cache);
cout << "index of max val of x2: " << idx << endl;*/
// ----------- gpu_sum -------------
/*float* cache;
cudaMallocManaged((void**)&cache, K * sizeof(float));
printToHost(cache, 1, K, "init cache");
float s = gpu_sum(d_x, K, cache);
cout << "sum of x: " << s << endl;
printToHost(cache, 1, K+5, "cache");*/
// --------- gpu_softmax -----------
/*float* soft;
cudaMallocManaged((void**)&soft, K * sizeof(float));
gpu_softmax(d_x, K, soft, cache);
printToHost(soft, 1, K, "softmax of x");*/
// ------------ gpu_scal -----------
/*float* dest;
cudaMallocManaged((void**)&dest, K * sizeof(float));
gpu_scal(d_x2, K, 0.1, dest);
printToHost(dest, 1, K, "scal of x2");*/
// -------- gpu_tanh_add_add :) --------------
/*float* dest;
cudaMallocManaged((void**)&dest, K * sizeof(float));
gpu_tanh_add_add(d_x, d_x2, d_x3, K, dest);
printToHost(dest, 1, K, "tanh(v1+v2+v3)");*/
// ------------ gpu_tanh :)------------------
/*float* res_tanh;
cudaMallocManaged((void**)&res_tanh, K * sizeof(float));
gpu_tanh(d_x, K, res_tanh);
printToHost(res_tanh, 1, K, "tanh(x)");*/
// ----------- gpu_mul_elemwise :)--------
//float* mul;
//cudaMallocManaged((void**)&mul, M*K * sizeof(float));
//gpu_mul_elemwise(d_in1, d_in1, M*K, mul);
//printToHost(mul, M, K, "mul.");
// ----------- gpu_add :) --------------------
/*float* add;
cudaMallocManaged((void**)&add, M*K * sizeof(float));
gpu_add(d_in1, d_in1, M*K, add);
printToHost(add, M, K, "add");*/
// -------------- gpu_mmul :)--------------
/*gpu_mmul(handle, d_in1, d_in2, M, K, N, d_out);
printToHost(d_out, M, N, "in1 * in2");*/
// -------------- gpu_mv :)--------------
/*float* Ax;
cudaMallocManaged((void**)&Ax, M * sizeof(float));
gpu_mv(handle, d_in1, d_x, M, K, Ax, false);
printToHost(Ax, M, 1, "Ax");*/
// ------------ get get/set col :) -----------
/*float* col1;
cudaMallocManaged((void**)&col1, M * sizeof(float));
gpu_get_col(d_in, M, N, 1, col1);
printToHost(col1, M, 1, "col1");
float* setVal;
cudaMallocManaged((void**)&setVal, M, 1);
gpu_fill(setVal, M * 1, 2.3f);
gpu_set_col(d_in, M, N, 3, setVal);
printToHost(d_in, M, N, "set col3 to 2.3");*/
// --------- gpu_copy :) --------------
/*float* d_cp = NULL;
cudaMallocManaged((void**)&d_cp, 2*M * sizeof(float));
gpu_copy(d_cp, 5, d_in, 0, M);
printToHost(d_cp, M, 2, "copy first col of d_in");*/
// ----------- score2onehot :) -------
/*int idx1;
float* onehot = score2onehot(7.0f, idx1, 10, 6.0, 8.9);
cout << "idx1: " << idx1 << endl;
for (int i = 0; i < 10; i++)
{
cout << onehot[i] << " " << endl;
}
cout << endl;*/
// ------ gpu_clear_arr -----------
//gpu_clear_arr(d_in, size);
//printToHost(d_in, M, N, "clear to 0");
// ------ fill rand :) --------
/*gpu_fill_rand(d_in, size, 1, -0.1f, 0.1f, 11);
printToHost(d_in, M, N, "rand");
// ------- gpu_copy(); :) ---------------
float* d_cp = NULL;
cudaMallocManaged((void**)&d_cp, 3 * sizeof(float));
gpu_copy(d_cp, d_in, 1, 4);
printToHost(d_cp, 1, 3, "copy");
// ---------- gpu_fill(); :)-----------
gpu_fill(d_in, size, 2.3);
printToHost(d_in, M, N, "fill with 2.3");
// free
cudaFree(d_cp);*/
cudaFree(d_in1);
cudaFree(d_in2);
cudaFree(d_out);
cublasDestroy(handle);
}
void initPara(Para * para, int Nmax)
{
// malloc
cudaMallocHost((void**)¶->h_total_epoches, sizeof(float));
cudaMallocHost((void**)¶->h_n_features, sizeof(float));
cudaMallocHost((void**)¶->h_n_hidden, sizeof(float));
cudaMallocHost((void**)¶->h_n_output_classes, sizeof(float));
cudaMallocHost((void**)¶->h_alpha, sizeof(float));
cudaMallocHost((void**)¶->h_score_min, sizeof(float));
cudaMallocHost((void**)¶->h_score_max, sizeof(float));
cudaMalloc((void**)¶->d_total_epoches, sizeof(float));
cudaMalloc((void**)¶->d_n_features, sizeof(float));
cudaMalloc((void**)¶->d_n_hidden, sizeof(float));
cudaMalloc((void**)¶->d_n_output_classes, sizeof(float));
cudaMalloc((void**)¶->d_alpha, sizeof(float));
cudaMalloc((void**)¶->d_score_min, sizeof(float));
cudaMalloc((void**)¶->d_score_max, sizeof(float));
// malloc dP, only in GPU needed
cudaMalloc((void**)¶->d_dWxh, n_hidden*n_features * sizeof(float));
cudaMalloc((void**)¶->d_dWhh, n_hidden*n_hidden * sizeof(float));
cudaMalloc((void**)¶->d_dWhy, n_hidden*n_output_classes * sizeof(float));
cudaMalloc((void**)¶->d_dbh, n_hidden * sizeof(float));
cudaMalloc((void**)¶->d_dby, n_output_classes * sizeof(float));
cudaMalloc((void**)¶->d_dhnext, n_hidden * sizeof(float));
cudaMalloc((void**)¶->d_dy, n_output_classes * sizeof(float));
cudaMalloc((void**)¶->d_dh, n_hidden * sizeof(float));
cudaMalloc((void**)¶->d_dhraw, n_hidden * sizeof(float));
// malloc params of RNN
cudaMallocHost((void**)¶->h_Wxh, n_hidden*n_features * sizeof(float));
cudaMallocHost((void**)¶->h_Whh, n_hidden*n_hidden * sizeof(float));
cudaMallocHost((void**)¶->h_Why, n_hidden*n_output_classes * sizeof(float));
cudaMallocHost((void**)¶->h_bh, n_hidden * sizeof(float));
cudaMallocHost((void**)¶->h_by, n_output_classes * sizeof(float));
cudaMalloc((void**)¶->d_Wxh, n_hidden*n_features * sizeof(float));
cudaMalloc((void**)¶->d_Whh, n_hidden*n_hidden * sizeof(float));
cudaMalloc((void**)¶->d_Why, n_hidden*n_output_classes * sizeof(float));
cudaMalloc((void**)¶->d_bh, n_hidden * sizeof(float));
cudaMalloc((void**)¶->d_by, n_output_classes * sizeof(float));
// malloc state, only in GPU needed
cudaMalloc((void**)¶->d_xs, n_features*Nmax * sizeof(float));
cudaMalloc((void**)¶->d_hs, n_hidden*(Nmax + 1) * sizeof(float));
cudaMalloc((void**)¶->d_ys, n_output_classes*Nmax * sizeof(float));
cudaMalloc((void**)¶->d_ps, n_output_classes*Nmax * sizeof(float));
cudaMallocManaged((void**)¶->d_Nmax, sizeof(float));
para->d_Nmax[0] = Nmax;
// malloc cache
int sum1 = n_features + n_features + n_output_classes;
cudaMalloc((void**)¶->d_tmp_d_vec, sum1 * sizeof(float));
cudaMalloc((void**)¶->d_tmp_d_vec2, sum1 * sizeof(float));
cudaMalloc((void**)¶->d_W_tmp1, sum1*sum1 * sizeof(float));
cudaMalloc((void**)¶->d_W_tmp2, sum1*sum1 * sizeof(float));
cudaMalloc((void**)¶->d_W_tmp3, sum1*sum1 * sizeof(float));
cudaMallocHost((void**)¶->h_cache, sum1*sum1 * sizeof(float));
}
void deInitPara(Para * para)
{
// free
cudaFreeHost(para->h_total_epoches);
cudaFreeHost(para->h_n_features);
cudaFreeHost(para->h_n_hidden);
cudaFreeHost(para->h_n_output_classes);
cudaFreeHost(para->h_alpha);
cudaFreeHost(para->h_score_min);
cudaFreeHost(para->h_score_max);
cudaFree(para->d_total_epoches);
cudaFree(para->d_n_features);
cudaFree(para->d_n_hidden);
cudaFree(para->d_n_output_classes);
cudaFree(para->d_alpha);
cudaFree(para->d_score_min);
cudaFree(para->d_score_max);
// free dP, only GPU
cudaFree(para->d_dWxh);
cudaFree(para->d_dWhh);
cudaFree(para->d_dWhy);
cudaFree(para->d_dbh);
cudaFree(para->d_dby);
cudaFree(para->d_dhnext);
cudaFree(para->d_dy);
cudaFree(para->d_dh);
cudaFree(para->d_dhraw);
// free params of RNN
cudaFreeHost(para->h_Wxh);
cudaFreeHost(para->h_Whh);
cudaFreeHost(para->h_Why);
cudaFreeHost(para->h_bh);
cudaFreeHost(para->h_by);
cudaFree(para->d_Wxh);
cudaFree(para->d_Whh);
cudaFree(para->d_Why);
cudaFree(para->d_bh);
cudaFree(para->d_by);
// free sces
cudaFreeHost(para->h_sces_id_score);
cudaFreeHost(para->h_sces_data);
cudaFreeHost(para->h_sces_data_mn);
cudaFreeHost(para->h_sces_data_idx_begin);
cudaFreeHost(para->h_num_sces);
cudaFreeHost(para->h_total_epoches);
cudaFree(para->d_sces_id_score);
cudaFree(para->d_sces_data);
cudaFree(para->d_sces_data_mn);
cudaFree(para->d_sces_data_idx_begin);
cudaFree(para->d_num_sces);
cudaFree(para->d_total_epoches);
// free state, inly GPU
cudaFree(para->d_xs);
cudaFree(para->d_hs);
cudaFree(para->d_ys);
cudaFree(para->d_ps);
cudaFree(para->d_Nmax);
// free cache, only GPU
int sum1 = n_features + n_features + n_output_classes;
cudaFree(para->d_tmp_d_vec);
cudaFree(para->d_tmp_d_vec2);
cudaFree(para->d_W_tmp1);
cudaFree(para->d_W_tmp2);
cudaFree(para->d_W_tmp3);
}
|
055f4f76f549aaa7d8b8cd21f35376872cef480a.hip
|
// !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------------------------------
// Copyrighted by Marko Rakita.
// Author: Marko Rakita
// File contains: Neural networks configuration parser.
// Created: 03/17/2016.
// ----------------------------------------------------------------------------------------------------
#include "include/configurationparser.cuh"
NeuralNet* ConfigurationParser::ParseNetworkFromConfiguration(ParsingMode parsingMode, string configurationFile, string dataFolder, uint batchSize,
bool initializeLayersParams)
{
m_parsingMode = parsingMode;
m_dataFolder = dataFolder;
m_batchSize = batchSize;
m_initializeLayersParams = initializeLayersParams;
m_layersTiers.clear();
m_tiersLines.clear();
ParseTierLines(configurationFile);
FindMaxNetworkTierSize();
m_neuralNet = new NeuralNet(m_maxNetworkTierSize);
ParseLayersTiers();
// Reverting back to default device.
CudaAssert(hipSetDevice(0));
for (vector<Layer*>& layersTier: m_layersTiers)
{
m_neuralNet->AddLayersTier(layersTier);
}
return m_neuralNet;
}
string ConfigurationParser::TrimLine(string line)
{
if (line == "")
{
return line;
}
string trimmedLine;
// Trim leading whitespace.
size_t firstNonWs = line.find_first_not_of(" \t");
if (firstNonWs != string::npos)
{
trimmedLine = line.substr(firstNonWs);
}
// Trim trailing whitespace.
size_t lastNonWs = trimmedLine.find_last_not_of(" \t");
if (lastNonWs != string::npos)
{
trimmedLine = trimmedLine.substr(0, lastNonWs + 1);
}
return trimmedLine;
}
void ConfigurationParser::ParseTierLines(string configurationFile)
{
ifstream configuration(configurationFile);
string line;
vector<string> currTierLines;
bool encounteredFirstLayer = false;
while (getline(configuration, line))
{
string trimmedLine = TrimLine(line);
if (trimmedLine.find("layer:") == 0)
{
if (!currTierLines.empty() && encounteredFirstLayer)
{
m_tiersLines.push_back(currTierLines);
}
encounteredFirstLayer = true;
currTierLines.clear();
currTierLines.push_back(trimmedLine);
}
else if (trimmedLine != "")
{
currTierLines.push_back(trimmedLine);
}
}
if (!currTierLines.empty() && encounteredFirstLayer)
{
m_tiersLines.push_back(currTierLines);
}
}
bool ConfigurationParser::ParseParameterUint(string line, string parameterName, uint& parameterValue)
{
if (line.find(parameterName) != 0)
{
return false;
}
size_t valuePosition = line.find_last_of(":");
ShipAssert(valuePosition != string::npos, "Can't parse parameter: " + parameterName + " from line: " + line);
string lineValue = line.substr(valuePosition + 1);
parameterValue = stoi(TrimLine(lineValue));
return true;
}
bool ConfigurationParser::ParseParameterFloat(string line, string parameterName, float& parameterValue)
{
if (line.find(parameterName) != 0)
{
return false;
}
size_t valuePosition = line.find_last_of(":");
ShipAssert(valuePosition != string::npos, "Can't parse parameter: " + parameterName + " from line: " + line);
string lineValue = line.substr(valuePosition + 1);
parameterValue = stof(TrimLine(lineValue));
return true;
}
bool ConfigurationParser::ParseParameterString(string line, string parameterName, string& parameterValue)
{
if (line.find(parameterName) != 0)
{
return false;
}
size_t valuePosition = line.find_last_of(":");
ShipAssert(valuePosition != string::npos, "Can't parse parameter: " + parameterName + " from line: " + line);
string lineValue = line.substr(valuePosition + 1);
parameterValue = ConvertToLowercase(TrimLine(lineValue));
return true;
}
void ConfigurationParser::FindMaxNetworkTierSize()
{
m_maxNetworkTierSize = 1;
if (m_parsingMode == ParsingMode::Training)
{
for (vector<string>& tierLines : m_tiersLines)
{
for (string& line : tierLines)
{
uint tierSize = 1;
ParseParameterUint(line, "tierSize", tierSize);
m_maxNetworkTierSize = max((uint)m_maxNetworkTierSize, tierSize);
}
}
}
}
LayerType ConfigurationParser::GetLayerType(string layerTypeName)
{
if (layerTypeName == "input")
{
return LayerType::Input;
}
else if (layerTypeName == "convolutional")
{
return LayerType::Convolutional;
}
else if (layerTypeName == "responsenormalization")
{
return LayerType::ResponseNormalization;
}
else if (layerTypeName == "maxpool")
{
return LayerType::MaxPool;
}
else if (layerTypeName == "standard")
{
return LayerType::Standard;
}
else if (layerTypeName == "dropout")
{
return LayerType::Dropout;
}
else if (layerTypeName == "softmax")
{
return LayerType::SoftMax;
}
else if (layerTypeName == "output")
{
return LayerType::Output;
}
else
{
ShipAssert(false, "Unknown layer type name: " + layerTypeName);
return LayerType::Standard;
}
}
ActivationType ConfigurationParser::GetActivationType(string activationTypeName)
{
if (activationTypeName == "linear")
{
return ActivationType::Linear;
}
else if (activationTypeName == "relu")
{
return ActivationType::ReLu;
}
else if (activationTypeName == "sigmoid")
{
return ActivationType::Sigmoid;
}
else if (activationTypeName == "tanh")
{
return ActivationType::Tanh;
}
else
{
ShipAssert(false, "Unknown activation type name: " + activationTypeName);
return ActivationType::Linear;
}
}
LossFunctionType ConfigurationParser::GetLossFunctionType(string lossFunctionName)
{
if (lossFunctionName == "logisticregression")
{
return LossFunctionType::LogisticRegression;
}
else
{
ShipAssert(false, "Unknown loss function name: " + lossFunctionName);
return LossFunctionType::LogisticRegression;
}
}
DataType ConfigurationParser::GetDataType(string dataTypeName)
{
if (dataTypeName == "image")
{
return DataType::Image;
}
else if (dataTypeName == "text")
{
return DataType::Text;
}
else
{
ShipAssert(false, "Unknown data type name: " + dataTypeName);
return DataType::Text;
}
}
void ConfigurationParser::ParseLayersTiers()
{
for (size_t tierIndex = 0; tierIndex < m_tiersLines.size(); ++tierIndex)
{
vector<string>& tierLines = m_tiersLines[tierIndex];
string layerTypeName;
ParseParameterString(tierLines[0], "layer", layerTypeName);
LayerType tierLayerType = GetLayerType(layerTypeName);
if (tierIndex == 0)
{
ShipAssert(tierLayerType == LayerType::Input, "First layer in the network should be input layer!");
}
else if (tierIndex == m_tiersLines.size() - 1)
{
ShipAssert(tierLayerType == LayerType::Output, "Last layer in the network should be output layer!");
}
vector<Layer*> layerTier = ParseLayersTier(tierIndex, tierLayerType);
m_layersTiers.push_back(layerTier);
}
}
vector<Layer*> ConfigurationParser::FindPrevLayers(ParallelismMode currTierParallelismMode, uint layerIndex, uint currTierSize, size_t prevTierIndex, string prevLayersParam)
{
vector<Layer*> prevLayers;
if (m_parsingMode == ParsingMode::Prediction || (prevLayersParam != "all" && currTierParallelismMode == m_layersTiers[prevTierIndex][0]->GetParallelismMode() &&
currTierSize == m_layersTiers[prevTierIndex].size()))
{
prevLayers.push_back(m_layersTiers[prevTierIndex][layerIndex]);
}
else
{
prevLayers = m_layersTiers[prevTierIndex];
}
return prevLayers;
}
bool ConfigurationParser::ShouldHoldActivationGradients(ParallelismMode currTierParallelismMode, uint currTierSize, size_t currTierIndex, uint layerIndex)
{
if (m_parsingMode == ParsingMode::Prediction || currTierIndex == m_tiersLines.size() - 1)
{
return false;
}
uint nextTierSize = 1;
bool parsedTierSize = false;
string parallelismValue;
ParallelismMode nextTierParallelismMode = ParallelismMode::Model;
bool parsedParallelism = false;
string prevLayersParam;
bool parsedPrevLayers = false;
vector<string>& tierLines = m_tiersLines[currTierIndex + 1];
for (string& line : tierLines)
{
parsedTierSize = parsedTierSize || ParseParameterUint(line, "tierSize", nextTierSize);
parsedParallelism = parsedParallelism || ParseParameterString(line, "parallelism", parallelismValue);
parsedPrevLayers = parsedPrevLayers || ParseParameterString(line, "prevLayers", prevLayersParam);
}
if (parsedParallelism)
{
nextTierParallelismMode = parallelismValue == "data" ? ParallelismMode::Data : ParallelismMode::Model;
}
if ((nextTierSize == 1 && layerIndex == 0) || (prevLayersParam != "all" && currTierParallelismMode == nextTierParallelismMode && currTierSize == nextTierSize))
{
return false;
}
else
{
return true;
}
}
void ConfigurationParser::FindInputParams(vector<Layer*>& prevLayers, uint layerIndex, uint tierSize, ParallelismMode parallelismMode, uint& inputNumChannels,
uint& inputDataWidth, uint& inputDataHeight, uint& inputDataCount, bool& holdsInputData)
{
if (prevLayers[0]->GetLayerType() == LayerType::Input)
{
InputLayer* inputLayer = static_cast<InputLayer*>(prevLayers[0]);
inputNumChannels = inputLayer->GetActivationNumChannels();
inputDataWidth = inputLayer->GetActivationDataWidth();
inputDataHeight = inputLayer->GetActivationDataHeight();
if (parallelismMode == ParallelismMode::Data)
{
inputDataCount = inputLayer->GetInputDataCount() / tierSize;
}
else
{
inputDataCount = inputLayer->GetInputDataCount();
}
}
else if (prevLayers[0]->GetParallelismMode() == ParallelismMode::Data)
{
inputNumChannels = prevLayers[0]->GetActivationNumChannels();
inputDataWidth = prevLayers[0]->GetActivationDataWidth();
inputDataHeight = prevLayers[0]->GetActivationDataHeight();
inputDataCount = prevLayers[0]->GetActivationDataCount();
}
else if (prevLayers[0]->GetLayerType() == LayerType::Convolutional || prevLayers[0]->GetLayerType() == LayerType::ResponseNormalization ||
prevLayers[0]->GetLayerType() == LayerType::MaxPool)
{
inputNumChannels = prevLayers[0]->GetActivationNumChannels();
for (size_t i = 1; i < prevLayers.size(); ++i)
{
inputNumChannels += prevLayers[i]->GetActivationNumChannels();
}
inputDataWidth = prevLayers[0]->GetActivationDataWidth();
inputDataHeight = prevLayers[0]->GetActivationDataHeight();
inputDataCount = prevLayers[0]->GetActivationDataCount();
}
else
{
inputNumChannels = prevLayers[0]->GetActivationNumChannels();
inputDataWidth = prevLayers[0]->GetActivationDataWidth();
for (size_t i = 1; i < prevLayers.size(); ++i)
{
inputDataWidth += prevLayers[i]->GetActivationDataWidth();
}
inputDataHeight = prevLayers[0]->GetActivationDataHeight();
inputDataCount = prevLayers[0]->GetActivationDataCount();
}
holdsInputData = prevLayers.size() > 1 || (prevLayers[0]->GetLayerType() != LayerType::Input && prevLayers[0]->GetIndexInTier() != layerIndex);
}
void ConfigurationParser::ParseInputLayerTier(vector<Layer*>& outLayerTier)
{
string dataTypeValue;
DataType dataType;
bool parsedDataType = false;
uint numChannels;
bool parsedNumChannels = false;
uint inputDataWidth;
bool parsedInputDataWidth = false;
uint inputDataHeight;
bool parsedInputDataHeight = false;
uint trainDataWidth = 0;
bool parsedTrainDataWidth = false;
uint trainDataHeight = 0;
bool parsedTrainDataHeight = false;
uint numTestPatches;
bool parsedNumTestPatches = false;
bool testOnFlips;
string testOnFlipsValue;
bool parsedTestOnFlips = false;
vector<string>& tierLines = m_tiersLines[0];
for (string& line : tierLines)
{
parsedDataType = parsedDataType || ParseParameterString(line, "data", dataTypeValue);
parsedNumChannels = parsedNumChannels || ParseParameterUint(line, "numChannels", numChannels);
parsedInputDataWidth = parsedInputDataWidth || ParseParameterUint(line, "inputDataWidth", inputDataWidth);
parsedInputDataHeight = parsedInputDataHeight || ParseParameterUint(line, "inputDataHeight", inputDataHeight);
parsedTrainDataWidth = parsedTrainDataWidth || ParseParameterUint(line, "trainDataWidth", trainDataWidth);
parsedTrainDataHeight = parsedTrainDataHeight || ParseParameterUint(line, "trainDataHeight", trainDataHeight);
parsedNumTestPatches = parsedNumTestPatches || ParseParameterUint(line, "numTestPatches", numTestPatches);
parsedTestOnFlips = parsedTestOnFlips || ParseParameterString(line, "testOnFlips", testOnFlipsValue);
}
ShipAssert(parsedDataType, "Can't parse data type for Input layer!");
ShipAssert(parsedNumChannels, "Can't parse number of channels for Input layer!");
ShipAssert(parsedInputDataWidth, "Can't parse input data width for Input layer!");
ShipAssert(parsedInputDataHeight, "Can't parse input data height for Input layer!");
if (dataTypeValue == "image")
{
ShipAssert(parsedTrainDataWidth, "Can't parse train data width for Input layer!");
ShipAssert(parsedTrainDataHeight, "Can't parse train data height for Input layer!");
}
ShipAssert(parsedNumTestPatches, "Can't parse number of test patches for Input layer!");
ShipAssert(parsedTestOnFlips, "Can't parse should we test on flips for Input layer!");
testOnFlips = testOnFlipsValue == "yes" ? true : false;
dataType = GetDataType(dataTypeValue);
// Finding number of inputs.
ShipAssert(m_tiersLines.size() > 1, "We need to have more than input layer to train network, you know...");
uint nextTierSize = 1;
bool parsedTierSize = false;
string parallelismValue;
ParallelismMode nextTierParallelismMode = ParallelismMode::Model;
bool parsedParallelism = false;
vector<string>& nextTierLines = m_tiersLines[1];
for (string& line : nextTierLines)
{
parsedTierSize = parsedTierSize || ParseParameterUint(line, "tierSize", nextTierSize);
parsedParallelism = parsedParallelism || ParseParameterString(line, "parallelism", parallelismValue);
}
if (parsedParallelism)
{
nextTierParallelismMode = parallelismValue == "data" ? ParallelismMode::Data : ParallelismMode::Model;
}
uint numInputs = nextTierParallelismMode == ParallelismMode::Data ? nextTierSize : 1;
CudaAssert(hipSetDevice(0));
outLayerTier.push_back(new InputLayer(m_dataFolder, dataType, m_neuralNet->GetDeviceMemoryStreams(), numChannels, inputDataWidth, inputDataHeight,
(uint)m_maxNetworkTierSize * m_batchSize, trainDataWidth, trainDataHeight, m_parsingMode == ParsingMode::Training ? numInputs : 1, numTestPatches, testOnFlips));
}
void ConfigurationParser::ParseConvolutionalLayerTier(size_t tierIndex, vector<Layer*>& outLayerTier)
{
uint tierSize;
bool parsedTierSize = false;
string parallelismValue;
ParallelismMode parallelismMode;
bool parsedParallelism = false;
uint numFilters;
bool parsedNumFilters = false;
uint filterWidth;
bool parsedFilterWidth = false;
uint filterHeight;
bool parsedFilterHeight = false;
float weightsDeviation;
bool parsedWeightsDeviation = false;
float weightsMomentum;
bool parsedWeightsMomentum = false;
float weightsDecay;
bool parsedWeightsDecay = false;
float weightsStartingLR;
bool parsedWeightsStartingLR = false;
float weightsLRStep;
bool parsedWeightsLRStep = false;
float weightsLRFactor;
bool parsedWeightsLRFactor = false;
float biasesInitialValue;
bool parsedBiasesInitialValue = false;
float biasesMomentum;
bool parsedBiasesMomentum = false;
float biasesDecay;
bool parsedBiasesDecay = false;
float biasesStartingLR;
bool parsedBiasesStartingLR = false;
float biasesLRStep;
bool parsedBiasesLRStep = false;
float biasesLRFactor;
bool parsedBiasesLRFactor = false;
uint paddingX;
bool parsedPaddingX = false;
uint paddingY;
bool parsedPaddingY = false;
uint stride;
bool parsedStride = false;
string activationTypeValue;
ActivationType activationType;
bool parsedActivationType = false;
string prevLayersParam;
bool parsedPrevLayers = false;
vector<string>& tierLines = m_tiersLines[tierIndex];
for (string& line : tierLines)
{
parsedTierSize = parsedTierSize || ParseParameterUint(line, "tierSize", tierSize);
parsedParallelism = parsedParallelism || ParseParameterString(line, "parallelism", parallelismValue);
parsedNumFilters = parsedNumFilters || ParseParameterUint(line, "numFilters", numFilters);
parsedFilterWidth = parsedFilterWidth || ParseParameterUint(line, "filterWidth", filterWidth);
parsedFilterHeight = parsedFilterHeight || ParseParameterUint(line, "filterHeight", filterHeight);
parsedWeightsDeviation = parsedWeightsDeviation || ParseParameterFloat(line, "weightsDeviation", weightsDeviation);
parsedWeightsMomentum = parsedWeightsMomentum || ParseParameterFloat(line, "weightsMomentum", weightsMomentum);
parsedWeightsDecay = parsedWeightsDecay || ParseParameterFloat(line, "weightsDecay", weightsDecay);
parsedWeightsStartingLR = parsedWeightsStartingLR || ParseParameterFloat(line, "weightsStartingLR", weightsStartingLR);
parsedWeightsLRStep = parsedWeightsLRStep || ParseParameterFloat(line, "weightsLRStep", weightsLRStep);
parsedWeightsLRFactor = parsedWeightsLRFactor || ParseParameterFloat(line, "weightsLRFactor", weightsLRFactor);
parsedBiasesInitialValue = parsedBiasesInitialValue || ParseParameterFloat(line, "biasesInitialValue", biasesInitialValue);
parsedBiasesMomentum = parsedBiasesMomentum || ParseParameterFloat(line, "biasesMomentum", biasesMomentum);
parsedBiasesDecay = parsedBiasesDecay || ParseParameterFloat(line, "biasesDecay", biasesDecay);
parsedBiasesStartingLR = parsedBiasesStartingLR || ParseParameterFloat(line, "biasesStartingLR", biasesStartingLR);
parsedBiasesLRStep = parsedBiasesLRStep || ParseParameterFloat(line, "biasesLRStep", biasesLRStep);
parsedBiasesLRFactor = parsedBiasesLRFactor || ParseParameterFloat(line, "biasesLRFactor", biasesLRFactor);
parsedPaddingX = parsedPaddingX || ParseParameterUint(line, "paddingX", paddingX);
parsedPaddingY = parsedPaddingY || ParseParameterUint(line, "paddingY", paddingY);
parsedStride = parsedStride || ParseParameterUint(line, "stride", stride);
parsedActivationType = parsedActivationType || ParseParameterString(line, "activationType", activationTypeValue);
parsedPrevLayers = parsedPrevLayers || ParseParameterString(line, "prevLayers", prevLayersParam);
}
ShipAssert(parsedTierSize, "Can't parse tier size for Convolutional layer!");
ShipAssert(parsedParallelism, "Can't parse parallelism for Convolutional layer!");
ShipAssert(parsedNumFilters, "Can't parse number of filters for Convolutional layer!");
ShipAssert(parsedFilterWidth, "Can't parse filter width for Convolutional layer!");
ShipAssert(parsedFilterHeight, "Can't parse filter height for Convolutional layer!");
ShipAssert(parsedWeightsDeviation, "Can't parse weights deviation for Convolutional layer!");
ShipAssert(parsedWeightsMomentum, "Can't parse weights momentum for Convolutional layer!");
ShipAssert(parsedWeightsDecay, "Can't parse weights decay for Convolutional layer!");
ShipAssert(parsedWeightsStartingLR, "Can't parse weights starting learning rate for Convolutional layer!");
ShipAssert(parsedWeightsLRStep, "Can't parse weights learning rate step for Convolutional layer!");
ShipAssert(parsedWeightsLRFactor, "Can't parse weights learning rate factor for Convolutional layer!");
ShipAssert(parsedBiasesInitialValue, "Can't parse biases initial value for Convolutional layer!");
ShipAssert(parsedBiasesMomentum, "Can't parse biases momentum for Convolutional layer!");
ShipAssert(parsedBiasesDecay, "Can't parse biases decay for Convolutional layer!");
ShipAssert(parsedBiasesStartingLR, "Can't parse biases starting learning rate for Convolutional layer!");
ShipAssert(parsedBiasesLRStep, "Can't parse biases learning rate step for Convolutional layer!");
ShipAssert(parsedBiasesLRFactor, "Can't parse biases learnign rate factor for Convolutional layer!");
ShipAssert(parsedPaddingX, "Can't parse horizontal padding for Convolutional layer!");
ShipAssert(parsedPaddingY, "Can't parse vertical padding for Convolutional layer!");
ShipAssert(parsedStride, "Can't parse stride for Convolutional layer!");
ShipAssert(parsedActivationType, "Can't parse activation type for Convolutional layer!");
if (m_parsingMode == ParsingMode::Prediction)
{
tierSize = 1;
parallelismMode = ParallelismMode::Model;
}
else
{
parallelismMode = parallelismValue == "data" ? ParallelismMode::Data : ParallelismMode::Model;
}
activationType = GetActivationType(activationTypeValue);
for (uint layerIndex = 0; layerIndex < tierSize; ++layerIndex)
{
CudaAssert(hipSetDevice(layerIndex));
vector<Layer*> prevLayers = FindPrevLayers(parallelismMode, layerIndex, tierSize, tierIndex - 1, prevLayersParam);
uint inputNumChannels;
uint inputDataWidth;
uint inputDataHeight;
uint inputDataCount;
bool holdsInputData;
FindInputParams(prevLayers, layerIndex, tierSize, parallelismMode, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, holdsInputData);
bool holdsActivationGradients = ShouldHoldActivationGradients(parallelismMode, tierSize, tierIndex, layerIndex);
ConvolutionalLayer* convLayer = new ConvolutionalLayer(parallelismMode, m_neuralNet->GetDeviceCalculationStreams()[layerIndex],
m_neuralNet->GetDeviceMemoryStreams()[layerIndex], layerIndex, tierSize, inputNumChannels, inputDataWidth, inputDataHeight,
inputDataCount, holdsInputData, numFilters, filterWidth, filterHeight, inputNumChannels, m_initializeLayersParams, weightsDeviation,
m_initializeLayersParams, biasesInitialValue, weightsMomentum, weightsDecay, weightsLRStep, weightsStartingLR, weightsLRFactor,
biasesMomentum, biasesDecay, biasesLRStep, biasesStartingLR, biasesLRFactor, paddingX, paddingY, stride, activationType,
holdsActivationGradients);
for (Layer* prevLayer : prevLayers)
{
convLayer->AddPrevLayer(prevLayer);
prevLayer->AddNextLayer(convLayer);
}
outLayerTier.push_back(convLayer);
}
}
void ConfigurationParser::ParseResponseNormalizationLayerTier(size_t tierIndex, vector<Layer*>& outLayerTier)
{
uint tierSize;
bool parsedTierSize = false;
string parallelismValue;
ParallelismMode parallelismMode;
bool parsedParallelism = false;
uint depth;
bool parsedDepth = false;
float bias;
bool parsedBias = false;
float alphaCoeff;
bool parsedAlphaCoeff = false;
float betaCoeff;
bool parsedBetaCoeff = false;
string prevLayersParam;
bool parsedPrevLayers = false;
vector<string>& tierLines = m_tiersLines[tierIndex];
for (string& line : tierLines)
{
parsedTierSize = parsedTierSize || ParseParameterUint(line, "tierSize", tierSize);
parsedParallelism = parsedParallelism || ParseParameterString(line, "parallelism", parallelismValue);
parsedDepth = parsedDepth || ParseParameterUint(line, "depth", depth);
parsedBias = parsedBias || ParseParameterFloat(line, "bias", bias);
parsedAlphaCoeff = parsedAlphaCoeff || ParseParameterFloat(line, "alphaCoeff", alphaCoeff);
parsedBetaCoeff = parsedBetaCoeff || ParseParameterFloat(line, "betaCoeff", betaCoeff);
parsedPrevLayers = parsedPrevLayers || ParseParameterString(line, "prevLayers", prevLayersParam);
}
ShipAssert(parsedTierSize, "Can't parse tier size for Response Normalization layer!");
ShipAssert(parsedParallelism, "Can't parse parallelism for Response Normalization layer!");
ShipAssert(parsedDepth, "Can't parse depth for Response Normalization layer!");
ShipAssert(parsedBias, "Can't parse bias for Response Normalization layer!");
ShipAssert(parsedAlphaCoeff, "Can't parse alpha coefficient for Response Normalization layer!");
ShipAssert(parsedBetaCoeff, "Can't parse beta coefficient for Response Normalization layer!");
if (m_parsingMode == ParsingMode::Prediction)
{
tierSize = 1;
parallelismMode = ParallelismMode::Model;
}
else
{
parallelismMode = parallelismValue == "data" ? ParallelismMode::Data : ParallelismMode::Model;
}
for (uint layerIndex = 0; layerIndex < tierSize; ++layerIndex)
{
CudaAssert(hipSetDevice(layerIndex));
vector<Layer*> prevLayers = FindPrevLayers(parallelismMode, layerIndex, tierSize, tierIndex - 1, prevLayersParam);
uint inputNumChannels;
uint inputDataWidth;
uint inputDataHeight;
uint inputDataCount;
bool holdsInputData;
FindInputParams(prevLayers, layerIndex, tierSize, parallelismMode, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, holdsInputData);
bool holdsActivationGradients = ShouldHoldActivationGradients(parallelismMode, tierSize, tierIndex, layerIndex);
ResponseNormalizationLayer* reNormLayer = new ResponseNormalizationLayer(parallelismMode, m_neuralNet->GetDeviceCalculationStreams()[layerIndex],
m_neuralNet->GetDeviceMemoryStreams()[layerIndex], layerIndex, tierSize, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount,
holdsInputData, depth, bias, alphaCoeff, betaCoeff, holdsActivationGradients);
for (Layer* prevLayer : prevLayers)
{
reNormLayer->AddPrevLayer(prevLayer);
prevLayer->AddNextLayer(reNormLayer);
}
outLayerTier.push_back(reNormLayer);
}
}
void ConfigurationParser::ParseMaxPoolLayerTier(size_t tierIndex, vector<Layer*>& outLayerTier)
{
uint tierSize;
bool parsedTierSize = false;
string parallelismValue;
ParallelismMode parallelismMode;
bool parsedParallelism = false;
uint unitWidth;
bool parsedUnitWidth = false;
uint unitHeight;
bool parsedUnitHeight = false;
uint paddingX;
bool parsedPaddingX = false;
uint paddingY;
bool parsedPaddingY = false;
uint unitStride;
bool parsedUnitStride = false;
string prevLayersParam;
bool parsedPrevLayers = false;
vector<string>& tierLines = m_tiersLines[tierIndex];
for (string& line : tierLines)
{
parsedTierSize = parsedTierSize || ParseParameterUint(line, "tierSize", tierSize);
parsedParallelism = parsedParallelism || ParseParameterString(line, "parallelism", parallelismValue);
parsedUnitWidth = parsedUnitWidth || ParseParameterUint(line, "unitWidth", unitWidth);
parsedUnitHeight = parsedUnitHeight || ParseParameterUint(line, "unitHeight", unitHeight);
parsedPaddingX = parsedPaddingX || ParseParameterUint(line, "paddingX", paddingX);
parsedPaddingY = parsedPaddingY || ParseParameterUint(line, "paddingY", paddingY);
parsedUnitStride = parsedUnitStride || ParseParameterUint(line, "unitStride", unitStride);
parsedPrevLayers = parsedPrevLayers || ParseParameterString(line, "prevLayers", prevLayersParam);
}
ShipAssert(parsedTierSize, "Can't parse tier size for Max Pool layer!");
ShipAssert(parsedParallelism, "Can't parse parallelism for Max Pool layer!");
ShipAssert(parsedUnitWidth, "Can't parse unit width for Max Pool layer!");
ShipAssert(parsedUnitHeight, "Can't parse unit height for Max Pool layer!");
ShipAssert(parsedPaddingX, "Can't parse padding X for Max Pool layer!");
ShipAssert(parsedPaddingY, "Can't parse padding Y for Max Pool layer!");
ShipAssert(parsedUnitStride, "Can't parse unit stride for Max Pool layer!");
if (m_parsingMode == ParsingMode::Prediction)
{
tierSize = 1;
parallelismMode = ParallelismMode::Model;
}
else
{
parallelismMode = parallelismValue == "data" ? ParallelismMode::Data : ParallelismMode::Model;
}
for (uint layerIndex = 0; layerIndex < tierSize; ++layerIndex)
{
CudaAssert(hipSetDevice(layerIndex));
vector<Layer*> prevLayers = FindPrevLayers(parallelismMode, layerIndex, tierSize, tierIndex - 1, prevLayersParam);
uint inputNumChannels;
uint inputDataWidth;
uint inputDataHeight;
uint inputDataCount;
bool holdsInputData;
FindInputParams(prevLayers, layerIndex, tierSize, parallelismMode, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, holdsInputData);
bool holdsActivationGradients = ShouldHoldActivationGradients(parallelismMode, tierSize, tierIndex, layerIndex);
MaxPoolLayer* maxPoolLayer = new MaxPoolLayer(parallelismMode, m_neuralNet->GetDeviceCalculationStreams()[layerIndex],
m_neuralNet->GetDeviceMemoryStreams()[layerIndex], layerIndex, tierSize, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, holdsInputData,
unitWidth, unitHeight, paddingX, paddingY, unitStride, holdsActivationGradients);
for (Layer* prevLayer : prevLayers)
{
maxPoolLayer->AddPrevLayer(prevLayer);
prevLayer->AddNextLayer(maxPoolLayer);
}
outLayerTier.push_back(maxPoolLayer);
}
}
void ConfigurationParser::ParseStandardLayerTier(size_t tierIndex, vector<Layer*>& outLayerTier)
{
uint tierSize;
bool parsedTierSize = false;
string parallelismValue;
ParallelismMode parallelismMode;
bool parsedParallelism = false;
uint numNeurons;
bool parsedNumNeurons = false;
float weightsDeviation;
bool parsedWeightsDeviation = false;
float weightsMomentum;
bool parsedWeightsMomentum = false;
float weightsDecay;
bool parsedWeightsDecay = false;
float weightsStartingLR;
bool parsedWeightsStartingLR = false;
float weightsLRStep;
bool parsedWeightsLRStep = false;
float weightsLRFactor;
bool parsedWeightsLRFactor = false;
float biasesInitialValue;
bool parsedBiasesInitialValue = false;
float biasesMomentum;
bool parsedBiasesMomentum = false;
float biasesDecay;
bool parsedBiasesDecay = false;
float biasesStartingLR;
bool parsedBiasesStartingLR = false;
float biasesLRStep;
bool parsedBiasesLRStep = false;
float biasesLRFactor;
bool parsedBiasesLRFactor = false;
string activationTypeValue;
ActivationType activationType;
bool parsedActivationType = false;
string prevLayersParam;
bool parsedPrevLayers = false;
vector<string>& tierLines = m_tiersLines[tierIndex];
for (string& line : tierLines)
{
parsedTierSize = parsedTierSize || ParseParameterUint(line, "tierSize", tierSize);
parsedParallelism = parsedParallelism || ParseParameterString(line, "parallelism", parallelismValue);
parsedNumNeurons = parsedNumNeurons || ParseParameterUint(line, "numNeurons", numNeurons);
parsedWeightsDeviation = parsedWeightsDeviation || ParseParameterFloat(line, "weightsDeviation", weightsDeviation);
parsedWeightsMomentum = parsedWeightsMomentum || ParseParameterFloat(line, "weightsMomentum", weightsMomentum);
parsedWeightsDecay = parsedWeightsDecay || ParseParameterFloat(line, "weightsDecay", weightsDecay);
parsedWeightsStartingLR = parsedWeightsStartingLR || ParseParameterFloat(line, "weightsStartingLR", weightsStartingLR);
parsedWeightsLRStep = parsedWeightsLRStep || ParseParameterFloat(line, "weightsLRStep", weightsLRStep);
parsedWeightsLRFactor = parsedWeightsLRFactor || ParseParameterFloat(line, "weightsLRFactor", weightsLRFactor);
parsedBiasesInitialValue = parsedBiasesInitialValue || ParseParameterFloat(line, "biasesInitialValue", biasesInitialValue);
parsedBiasesMomentum = parsedBiasesMomentum || ParseParameterFloat(line, "biasesMomentum", biasesMomentum);
parsedBiasesDecay = parsedBiasesDecay || ParseParameterFloat(line, "biasesDecay", biasesDecay);
parsedBiasesStartingLR = parsedBiasesStartingLR || ParseParameterFloat(line, "biasesStartingLR", biasesStartingLR);
parsedBiasesLRStep = parsedBiasesLRStep || ParseParameterFloat(line, "biasesLRStep", biasesLRStep);
parsedBiasesLRFactor = parsedBiasesLRFactor || ParseParameterFloat(line, "biasesLRFactor", biasesLRFactor);
parsedActivationType = parsedActivationType || ParseParameterString(line, "activationType", activationTypeValue);
parsedPrevLayers = parsedPrevLayers || ParseParameterString(line, "prevLayers", prevLayersParam);
}
ShipAssert(parsedTierSize, "Can't parse tier size for Standard layer!");
ShipAssert(parsedParallelism, "Can't parse parallelism for Standard layer!");
ShipAssert(parsedNumNeurons, "Can't parse number of neurons for Standard layer!");
ShipAssert(parsedWeightsDeviation, "Can't parse weights deviation for Standard layer!");
ShipAssert(parsedWeightsMomentum, "Can't parse weights momentum for Standard layer!");
ShipAssert(parsedWeightsDecay, "Can't parse weights decay for Standard layer!");
ShipAssert(parsedWeightsStartingLR, "Can't parse weights starting learning rate for Standard layer!");
ShipAssert(parsedWeightsLRStep, "Can't parse weights learning rate step for Standard layer!");
ShipAssert(parsedWeightsLRFactor, "Can't parse weights learning rate factor for Standard layer!");
ShipAssert(parsedBiasesInitialValue, "Can't parse biases initial value for Standard layer!");
ShipAssert(parsedBiasesMomentum, "Can't parse biases momentum for Standard layer!");
ShipAssert(parsedBiasesDecay, "Can't parse biases decay for Standard layer!");
ShipAssert(parsedBiasesStartingLR, "Can't parse biases starting learning rate for Standard layer!");
ShipAssert(parsedBiasesLRStep, "Can't parse biases learning rate step for Standard layer!");
ShipAssert(parsedBiasesLRFactor, "Can't parse biases learnign rate factor for Standard layer!");
ShipAssert(parsedActivationType, "Can't parse activation type for Standard layer!");
if (m_parsingMode == ParsingMode::Prediction)
{
tierSize = 1;
parallelismMode = ParallelismMode::Model;
}
else
{
parallelismMode = parallelismValue == "data" ? ParallelismMode::Data : ParallelismMode::Model;
}
activationType = GetActivationType(activationTypeValue);
for (uint layerIndex = 0; layerIndex < tierSize; ++layerIndex)
{
CudaAssert(hipSetDevice(layerIndex));
vector<Layer*> prevLayers = FindPrevLayers(parallelismMode, layerIndex, tierSize, tierIndex - 1, prevLayersParam);
uint inputNumChannels;
uint inputDataWidth;
uint inputDataHeight;
uint inputDataCount;
bool holdsInputData;
FindInputParams(prevLayers, layerIndex, tierSize, parallelismMode, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, holdsInputData);
bool holdsActivationGradients = ShouldHoldActivationGradients(parallelismMode, tierSize, tierIndex, layerIndex);
StandardLayer* standardLayer = new StandardLayer(parallelismMode, m_neuralNet->GetDeviceCalculationStreams()[layerIndex],
m_neuralNet->GetDeviceMemoryStreams()[layerIndex], m_neuralNet->GetCublasHandles()[layerIndex], layerIndex, tierSize, inputNumChannels,
inputDataWidth, inputDataHeight, inputDataCount, holdsInputData, numNeurons, m_initializeLayersParams, weightsDeviation,
m_initializeLayersParams, biasesInitialValue, weightsMomentum, weightsDecay, weightsLRStep, weightsStartingLR, weightsLRFactor,
biasesMomentum, biasesDecay, biasesLRStep, biasesStartingLR, biasesLRFactor, activationType, holdsActivationGradients);
for (Layer* prevLayer : prevLayers)
{
standardLayer->AddPrevLayer(prevLayer);
prevLayer->AddNextLayer(standardLayer);
}
outLayerTier.push_back(standardLayer);
}
}
void ConfigurationParser::ParseDropoutLayerTier(size_t tierIndex, vector<Layer*>& outLayerTier)
{
uint tierSize;
bool parsedTierSize = false;
string parallelismValue;
ParallelismMode parallelismMode;
bool parsedParallelism = false;
float dropProbability;
bool parsedDropProbability = false;
string prevLayersParam;
bool parsedPrevLayers = false;
vector<string>& tierLines = m_tiersLines[tierIndex];
for (string& line : tierLines)
{
parsedTierSize = parsedTierSize || ParseParameterUint(line, "tierSize", tierSize);
parsedParallelism = parsedParallelism || ParseParameterString(line, "parallelism", parallelismValue);
parsedDropProbability = parsedDropProbability || ParseParameterFloat(line, "dropProbability", dropProbability);
parsedPrevLayers = parsedPrevLayers || ParseParameterString(line, "prevLayers", prevLayersParam);
}
ShipAssert(parsedTierSize, "Can't parse tier size for Dropout layer!");
ShipAssert(parsedParallelism, "Can't parse parallelism for Dropout layer!");
ShipAssert(parsedDropProbability, "Can't parse drop probability for Dropout layer!");
if (m_parsingMode == ParsingMode::Prediction)
{
tierSize = 1;
parallelismMode = ParallelismMode::Model;
}
else
{
parallelismMode = parallelismValue == "data" ? ParallelismMode::Data : ParallelismMode::Model;
}
for (uint layerIndex = 0; layerIndex < tierSize; ++layerIndex)
{
CudaAssert(hipSetDevice(layerIndex));
vector<Layer*> prevLayers = FindPrevLayers(parallelismMode, layerIndex, tierSize, tierIndex - 1, prevLayersParam);
uint inputNumChannels;
uint inputDataWidth;
uint inputDataHeight;
uint inputDataCount;
bool holdsInputData;
FindInputParams(prevLayers, layerIndex, tierSize, parallelismMode, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, holdsInputData);
bool holdsActivationGradients = ShouldHoldActivationGradients(parallelismMode, tierSize, tierIndex, layerIndex);
DropoutLayer* dropoutLayer = new DropoutLayer(parallelismMode, m_neuralNet->GetDeviceCalculationStreams()[layerIndex],
m_neuralNet->GetDeviceMemoryStreams()[layerIndex], m_neuralNet->GetCurandStatesBuffers()[layerIndex], layerIndex, tierSize, inputNumChannels,
inputDataWidth, inputDataHeight, inputDataCount, holdsInputData, dropProbability, false, holdsActivationGradients);
for (Layer* prevLayer : prevLayers)
{
dropoutLayer->AddPrevLayer(prevLayer);
prevLayer->AddNextLayer(dropoutLayer);
}
outLayerTier.push_back(dropoutLayer);
}
}
void ConfigurationParser::ParseSoftMaxLayerTier(size_t tierIndex, vector<Layer*>& outLayerTier)
{
uint tierSize;
bool parsedTierSize = false;
string parallelismValue;
ParallelismMode parallelismMode;
bool parsedParallelism = false;
string prevLayersParam;
bool parsedPrevLayers = false;
vector<string>& tierLines = m_tiersLines[tierIndex];
for (string& line : tierLines)
{
parsedTierSize = parsedTierSize || ParseParameterUint(line, "tierSize", tierSize);
parsedParallelism = parsedParallelism || ParseParameterString(line, "parallelism", parallelismValue);
parsedPrevLayers = parsedPrevLayers || ParseParameterString(line, "prevLayers", prevLayersParam);
}
ShipAssert(parsedTierSize, "Can't parse tier size for SoftMax layer!");
ShipAssert(parsedParallelism, "Can't parse parallelism for SoftMax layer!");
if (m_parsingMode == ParsingMode::Prediction)
{
tierSize = 1;
parallelismMode = ParallelismMode::Model;
}
else
{
parallelismMode = parallelismValue == "data" ? ParallelismMode::Data : ParallelismMode::Model;
}
for (uint layerIndex = 0; layerIndex < tierSize; ++layerIndex)
{
CudaAssert(hipSetDevice(layerIndex));
vector<Layer*> prevLayers = FindPrevLayers(parallelismMode, layerIndex, tierSize, tierIndex - 1, prevLayersParam);
uint inputNumChannels;
uint inputDataWidth;
uint inputDataHeight;
uint inputDataCount;
bool holdsInputData;
FindInputParams(prevLayers, layerIndex, tierSize, parallelismMode, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, holdsInputData);
bool holdsActivationGradients = ShouldHoldActivationGradients(parallelismMode, tierSize, tierIndex, layerIndex);
SoftMaxLayer* softMaxLayer = new SoftMaxLayer(parallelismMode, m_neuralNet->GetDeviceCalculationStreams()[layerIndex],
m_neuralNet->GetDeviceMemoryStreams()[layerIndex], inputNumChannels * inputDataWidth * inputDataHeight, inputDataCount, holdsInputData);
for (Layer* prevLayer : prevLayers)
{
softMaxLayer->AddPrevLayer(prevLayer);
prevLayer->AddNextLayer(softMaxLayer);
}
outLayerTier.push_back(softMaxLayer);
}
}
void ConfigurationParser::ParseOutputLayerTier(size_t tierIndex, vector<Layer*>& outLayerTier)
{
string lossFunctionName;
LossFunctionType lossFunction;
bool parsedLossFunction = false;
uint numGuesses;
bool parsedNumGuesses = false;
string prevLayersParam;
bool parsedPrevLayers = false;
vector<string>& tierLines = m_tiersLines[tierIndex];
for (string& line : tierLines)
{
parsedLossFunction = parsedLossFunction || ParseParameterString(line, "lossFunction", lossFunctionName);
parsedNumGuesses = parsedNumGuesses || ParseParameterUint(line, "numGuesses", numGuesses);
parsedPrevLayers = parsedPrevLayers || ParseParameterString(line, "prevLayers", prevLayersParam);
}
ShipAssert(parsedLossFunction, "Can't parse loss function for Output layer!");
lossFunction = GetLossFunctionType(lossFunctionName);
uint tierSize = 1;
uint layerIndex = 0;
ParallelismMode parallelismMode = ParallelismMode::Model;
vector<Layer*> prevLayers = FindPrevLayers(parallelismMode, layerIndex, tierSize, tierIndex - 1, prevLayersParam);
uint inputNumChannels;
uint inputDataWidth;
uint inputDataHeight;
uint inputDataCount;
bool holdsInputData;
FindInputParams(prevLayers, layerIndex, tierSize, parallelismMode, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, holdsInputData);
CudaAssert(hipSetDevice(0));
InputLayer* inputLayer = static_cast<InputLayer*>(m_layersTiers[0][0]);
OutputLayer* outputLayer = new OutputLayer(m_neuralNet->GetDeviceCalculationStreams()[0], m_neuralNet->GetDeviceMemoryStreams()[0],
inputNumChannels * inputDataWidth * inputDataHeight, inputDataCount, (uint)m_maxNetworkTierSize * inputDataCount, lossFunction, parsedNumGuesses, numGuesses,
inputLayer->GetNumTestPasses());
for (Layer* prevLayer : prevLayers)
{
outputLayer->AddPrevLayer(prevLayer);
prevLayer->AddNextLayer(outputLayer);
}
outLayerTier.push_back(outputLayer);
}
vector<Layer*> ConfigurationParser::ParseLayersTier(size_t tierIndex, LayerType tierLayerType)
{
vector<Layer*> layerTier;
if (tierLayerType == LayerType::Input)
{
ParseInputLayerTier(layerTier);
}
else if (tierLayerType == LayerType::Convolutional)
{
ParseConvolutionalLayerTier(tierIndex, layerTier);
}
else if (tierLayerType == LayerType::ResponseNormalization)
{
ParseResponseNormalizationLayerTier(tierIndex, layerTier);
}
else if (tierLayerType == LayerType::MaxPool)
{
ParseMaxPoolLayerTier(tierIndex, layerTier);
}
else if (tierLayerType == LayerType::Standard)
{
ParseStandardLayerTier(tierIndex, layerTier);
}
else if (tierLayerType == LayerType::Dropout)
{
ParseDropoutLayerTier(tierIndex, layerTier);
}
else if (tierLayerType == LayerType::SoftMax)
{
ParseSoftMaxLayerTier(tierIndex, layerTier);
}
else if (tierLayerType == LayerType::Output)
{
ParseOutputLayerTier(tierIndex, layerTier);
}
return layerTier;
}
|
055f4f76f549aaa7d8b8cd21f35376872cef480a.cu
|
// ----------------------------------------------------------------------------------------------------
// Copyrighted by Marko Rakita.
// Author: Marko Rakita
// File contains: Neural networks configuration parser.
// Created: 03/17/2016.
// ----------------------------------------------------------------------------------------------------
#include "include/configurationparser.cuh"
NeuralNet* ConfigurationParser::ParseNetworkFromConfiguration(ParsingMode parsingMode, string configurationFile, string dataFolder, uint batchSize,
bool initializeLayersParams)
{
m_parsingMode = parsingMode;
m_dataFolder = dataFolder;
m_batchSize = batchSize;
m_initializeLayersParams = initializeLayersParams;
m_layersTiers.clear();
m_tiersLines.clear();
ParseTierLines(configurationFile);
FindMaxNetworkTierSize();
m_neuralNet = new NeuralNet(m_maxNetworkTierSize);
ParseLayersTiers();
// Reverting back to default device.
CudaAssert(cudaSetDevice(0));
for (vector<Layer*>& layersTier: m_layersTiers)
{
m_neuralNet->AddLayersTier(layersTier);
}
return m_neuralNet;
}
string ConfigurationParser::TrimLine(string line)
{
if (line == "")
{
return line;
}
string trimmedLine;
// Trim leading whitespace.
size_t firstNonWs = line.find_first_not_of(" \t");
if (firstNonWs != string::npos)
{
trimmedLine = line.substr(firstNonWs);
}
// Trim trailing whitespace.
size_t lastNonWs = trimmedLine.find_last_not_of(" \t");
if (lastNonWs != string::npos)
{
trimmedLine = trimmedLine.substr(0, lastNonWs + 1);
}
return trimmedLine;
}
void ConfigurationParser::ParseTierLines(string configurationFile)
{
ifstream configuration(configurationFile);
string line;
vector<string> currTierLines;
bool encounteredFirstLayer = false;
while (getline(configuration, line))
{
string trimmedLine = TrimLine(line);
if (trimmedLine.find("layer:") == 0)
{
if (!currTierLines.empty() && encounteredFirstLayer)
{
m_tiersLines.push_back(currTierLines);
}
encounteredFirstLayer = true;
currTierLines.clear();
currTierLines.push_back(trimmedLine);
}
else if (trimmedLine != "")
{
currTierLines.push_back(trimmedLine);
}
}
if (!currTierLines.empty() && encounteredFirstLayer)
{
m_tiersLines.push_back(currTierLines);
}
}
bool ConfigurationParser::ParseParameterUint(string line, string parameterName, uint& parameterValue)
{
if (line.find(parameterName) != 0)
{
return false;
}
size_t valuePosition = line.find_last_of(":");
ShipAssert(valuePosition != string::npos, "Can't parse parameter: " + parameterName + " from line: " + line);
string lineValue = line.substr(valuePosition + 1);
parameterValue = stoi(TrimLine(lineValue));
return true;
}
bool ConfigurationParser::ParseParameterFloat(string line, string parameterName, float& parameterValue)
{
if (line.find(parameterName) != 0)
{
return false;
}
size_t valuePosition = line.find_last_of(":");
ShipAssert(valuePosition != string::npos, "Can't parse parameter: " + parameterName + " from line: " + line);
string lineValue = line.substr(valuePosition + 1);
parameterValue = stof(TrimLine(lineValue));
return true;
}
bool ConfigurationParser::ParseParameterString(string line, string parameterName, string& parameterValue)
{
if (line.find(parameterName) != 0)
{
return false;
}
size_t valuePosition = line.find_last_of(":");
ShipAssert(valuePosition != string::npos, "Can't parse parameter: " + parameterName + " from line: " + line);
string lineValue = line.substr(valuePosition + 1);
parameterValue = ConvertToLowercase(TrimLine(lineValue));
return true;
}
void ConfigurationParser::FindMaxNetworkTierSize()
{
m_maxNetworkTierSize = 1;
if (m_parsingMode == ParsingMode::Training)
{
for (vector<string>& tierLines : m_tiersLines)
{
for (string& line : tierLines)
{
uint tierSize = 1;
ParseParameterUint(line, "tierSize", tierSize);
m_maxNetworkTierSize = max((uint)m_maxNetworkTierSize, tierSize);
}
}
}
}
LayerType ConfigurationParser::GetLayerType(string layerTypeName)
{
if (layerTypeName == "input")
{
return LayerType::Input;
}
else if (layerTypeName == "convolutional")
{
return LayerType::Convolutional;
}
else if (layerTypeName == "responsenormalization")
{
return LayerType::ResponseNormalization;
}
else if (layerTypeName == "maxpool")
{
return LayerType::MaxPool;
}
else if (layerTypeName == "standard")
{
return LayerType::Standard;
}
else if (layerTypeName == "dropout")
{
return LayerType::Dropout;
}
else if (layerTypeName == "softmax")
{
return LayerType::SoftMax;
}
else if (layerTypeName == "output")
{
return LayerType::Output;
}
else
{
ShipAssert(false, "Unknown layer type name: " + layerTypeName);
return LayerType::Standard;
}
}
ActivationType ConfigurationParser::GetActivationType(string activationTypeName)
{
if (activationTypeName == "linear")
{
return ActivationType::Linear;
}
else if (activationTypeName == "relu")
{
return ActivationType::ReLu;
}
else if (activationTypeName == "sigmoid")
{
return ActivationType::Sigmoid;
}
else if (activationTypeName == "tanh")
{
return ActivationType::Tanh;
}
else
{
ShipAssert(false, "Unknown activation type name: " + activationTypeName);
return ActivationType::Linear;
}
}
LossFunctionType ConfigurationParser::GetLossFunctionType(string lossFunctionName)
{
if (lossFunctionName == "logisticregression")
{
return LossFunctionType::LogisticRegression;
}
else
{
ShipAssert(false, "Unknown loss function name: " + lossFunctionName);
return LossFunctionType::LogisticRegression;
}
}
DataType ConfigurationParser::GetDataType(string dataTypeName)
{
if (dataTypeName == "image")
{
return DataType::Image;
}
else if (dataTypeName == "text")
{
return DataType::Text;
}
else
{
ShipAssert(false, "Unknown data type name: " + dataTypeName);
return DataType::Text;
}
}
void ConfigurationParser::ParseLayersTiers()
{
for (size_t tierIndex = 0; tierIndex < m_tiersLines.size(); ++tierIndex)
{
vector<string>& tierLines = m_tiersLines[tierIndex];
string layerTypeName;
ParseParameterString(tierLines[0], "layer", layerTypeName);
LayerType tierLayerType = GetLayerType(layerTypeName);
if (tierIndex == 0)
{
ShipAssert(tierLayerType == LayerType::Input, "First layer in the network should be input layer!");
}
else if (tierIndex == m_tiersLines.size() - 1)
{
ShipAssert(tierLayerType == LayerType::Output, "Last layer in the network should be output layer!");
}
vector<Layer*> layerTier = ParseLayersTier(tierIndex, tierLayerType);
m_layersTiers.push_back(layerTier);
}
}
vector<Layer*> ConfigurationParser::FindPrevLayers(ParallelismMode currTierParallelismMode, uint layerIndex, uint currTierSize, size_t prevTierIndex, string prevLayersParam)
{
vector<Layer*> prevLayers;
if (m_parsingMode == ParsingMode::Prediction || (prevLayersParam != "all" && currTierParallelismMode == m_layersTiers[prevTierIndex][0]->GetParallelismMode() &&
currTierSize == m_layersTiers[prevTierIndex].size()))
{
prevLayers.push_back(m_layersTiers[prevTierIndex][layerIndex]);
}
else
{
prevLayers = m_layersTiers[prevTierIndex];
}
return prevLayers;
}
bool ConfigurationParser::ShouldHoldActivationGradients(ParallelismMode currTierParallelismMode, uint currTierSize, size_t currTierIndex, uint layerIndex)
{
if (m_parsingMode == ParsingMode::Prediction || currTierIndex == m_tiersLines.size() - 1)
{
return false;
}
uint nextTierSize = 1;
bool parsedTierSize = false;
string parallelismValue;
ParallelismMode nextTierParallelismMode = ParallelismMode::Model;
bool parsedParallelism = false;
string prevLayersParam;
bool parsedPrevLayers = false;
vector<string>& tierLines = m_tiersLines[currTierIndex + 1];
for (string& line : tierLines)
{
parsedTierSize = parsedTierSize || ParseParameterUint(line, "tierSize", nextTierSize);
parsedParallelism = parsedParallelism || ParseParameterString(line, "parallelism", parallelismValue);
parsedPrevLayers = parsedPrevLayers || ParseParameterString(line, "prevLayers", prevLayersParam);
}
if (parsedParallelism)
{
nextTierParallelismMode = parallelismValue == "data" ? ParallelismMode::Data : ParallelismMode::Model;
}
if ((nextTierSize == 1 && layerIndex == 0) || (prevLayersParam != "all" && currTierParallelismMode == nextTierParallelismMode && currTierSize == nextTierSize))
{
return false;
}
else
{
return true;
}
}
void ConfigurationParser::FindInputParams(vector<Layer*>& prevLayers, uint layerIndex, uint tierSize, ParallelismMode parallelismMode, uint& inputNumChannels,
uint& inputDataWidth, uint& inputDataHeight, uint& inputDataCount, bool& holdsInputData)
{
if (prevLayers[0]->GetLayerType() == LayerType::Input)
{
InputLayer* inputLayer = static_cast<InputLayer*>(prevLayers[0]);
inputNumChannels = inputLayer->GetActivationNumChannels();
inputDataWidth = inputLayer->GetActivationDataWidth();
inputDataHeight = inputLayer->GetActivationDataHeight();
if (parallelismMode == ParallelismMode::Data)
{
inputDataCount = inputLayer->GetInputDataCount() / tierSize;
}
else
{
inputDataCount = inputLayer->GetInputDataCount();
}
}
else if (prevLayers[0]->GetParallelismMode() == ParallelismMode::Data)
{
inputNumChannels = prevLayers[0]->GetActivationNumChannels();
inputDataWidth = prevLayers[0]->GetActivationDataWidth();
inputDataHeight = prevLayers[0]->GetActivationDataHeight();
inputDataCount = prevLayers[0]->GetActivationDataCount();
}
else if (prevLayers[0]->GetLayerType() == LayerType::Convolutional || prevLayers[0]->GetLayerType() == LayerType::ResponseNormalization ||
prevLayers[0]->GetLayerType() == LayerType::MaxPool)
{
inputNumChannels = prevLayers[0]->GetActivationNumChannels();
for (size_t i = 1; i < prevLayers.size(); ++i)
{
inputNumChannels += prevLayers[i]->GetActivationNumChannels();
}
inputDataWidth = prevLayers[0]->GetActivationDataWidth();
inputDataHeight = prevLayers[0]->GetActivationDataHeight();
inputDataCount = prevLayers[0]->GetActivationDataCount();
}
else
{
inputNumChannels = prevLayers[0]->GetActivationNumChannels();
inputDataWidth = prevLayers[0]->GetActivationDataWidth();
for (size_t i = 1; i < prevLayers.size(); ++i)
{
inputDataWidth += prevLayers[i]->GetActivationDataWidth();
}
inputDataHeight = prevLayers[0]->GetActivationDataHeight();
inputDataCount = prevLayers[0]->GetActivationDataCount();
}
holdsInputData = prevLayers.size() > 1 || (prevLayers[0]->GetLayerType() != LayerType::Input && prevLayers[0]->GetIndexInTier() != layerIndex);
}
void ConfigurationParser::ParseInputLayerTier(vector<Layer*>& outLayerTier)
{
string dataTypeValue;
DataType dataType;
bool parsedDataType = false;
uint numChannels;
bool parsedNumChannels = false;
uint inputDataWidth;
bool parsedInputDataWidth = false;
uint inputDataHeight;
bool parsedInputDataHeight = false;
uint trainDataWidth = 0;
bool parsedTrainDataWidth = false;
uint trainDataHeight = 0;
bool parsedTrainDataHeight = false;
uint numTestPatches;
bool parsedNumTestPatches = false;
bool testOnFlips;
string testOnFlipsValue;
bool parsedTestOnFlips = false;
vector<string>& tierLines = m_tiersLines[0];
for (string& line : tierLines)
{
parsedDataType = parsedDataType || ParseParameterString(line, "data", dataTypeValue);
parsedNumChannels = parsedNumChannels || ParseParameterUint(line, "numChannels", numChannels);
parsedInputDataWidth = parsedInputDataWidth || ParseParameterUint(line, "inputDataWidth", inputDataWidth);
parsedInputDataHeight = parsedInputDataHeight || ParseParameterUint(line, "inputDataHeight", inputDataHeight);
parsedTrainDataWidth = parsedTrainDataWidth || ParseParameterUint(line, "trainDataWidth", trainDataWidth);
parsedTrainDataHeight = parsedTrainDataHeight || ParseParameterUint(line, "trainDataHeight", trainDataHeight);
parsedNumTestPatches = parsedNumTestPatches || ParseParameterUint(line, "numTestPatches", numTestPatches);
parsedTestOnFlips = parsedTestOnFlips || ParseParameterString(line, "testOnFlips", testOnFlipsValue);
}
ShipAssert(parsedDataType, "Can't parse data type for Input layer!");
ShipAssert(parsedNumChannels, "Can't parse number of channels for Input layer!");
ShipAssert(parsedInputDataWidth, "Can't parse input data width for Input layer!");
ShipAssert(parsedInputDataHeight, "Can't parse input data height for Input layer!");
if (dataTypeValue == "image")
{
ShipAssert(parsedTrainDataWidth, "Can't parse train data width for Input layer!");
ShipAssert(parsedTrainDataHeight, "Can't parse train data height for Input layer!");
}
ShipAssert(parsedNumTestPatches, "Can't parse number of test patches for Input layer!");
ShipAssert(parsedTestOnFlips, "Can't parse should we test on flips for Input layer!");
testOnFlips = testOnFlipsValue == "yes" ? true : false;
dataType = GetDataType(dataTypeValue);
// Finding number of inputs.
ShipAssert(m_tiersLines.size() > 1, "We need to have more than input layer to train network, you know...");
uint nextTierSize = 1;
bool parsedTierSize = false;
string parallelismValue;
ParallelismMode nextTierParallelismMode = ParallelismMode::Model;
bool parsedParallelism = false;
vector<string>& nextTierLines = m_tiersLines[1];
for (string& line : nextTierLines)
{
parsedTierSize = parsedTierSize || ParseParameterUint(line, "tierSize", nextTierSize);
parsedParallelism = parsedParallelism || ParseParameterString(line, "parallelism", parallelismValue);
}
if (parsedParallelism)
{
nextTierParallelismMode = parallelismValue == "data" ? ParallelismMode::Data : ParallelismMode::Model;
}
uint numInputs = nextTierParallelismMode == ParallelismMode::Data ? nextTierSize : 1;
CudaAssert(cudaSetDevice(0));
outLayerTier.push_back(new InputLayer(m_dataFolder, dataType, m_neuralNet->GetDeviceMemoryStreams(), numChannels, inputDataWidth, inputDataHeight,
(uint)m_maxNetworkTierSize * m_batchSize, trainDataWidth, trainDataHeight, m_parsingMode == ParsingMode::Training ? numInputs : 1, numTestPatches, testOnFlips));
}
void ConfigurationParser::ParseConvolutionalLayerTier(size_t tierIndex, vector<Layer*>& outLayerTier)
{
uint tierSize;
bool parsedTierSize = false;
string parallelismValue;
ParallelismMode parallelismMode;
bool parsedParallelism = false;
uint numFilters;
bool parsedNumFilters = false;
uint filterWidth;
bool parsedFilterWidth = false;
uint filterHeight;
bool parsedFilterHeight = false;
float weightsDeviation;
bool parsedWeightsDeviation = false;
float weightsMomentum;
bool parsedWeightsMomentum = false;
float weightsDecay;
bool parsedWeightsDecay = false;
float weightsStartingLR;
bool parsedWeightsStartingLR = false;
float weightsLRStep;
bool parsedWeightsLRStep = false;
float weightsLRFactor;
bool parsedWeightsLRFactor = false;
float biasesInitialValue;
bool parsedBiasesInitialValue = false;
float biasesMomentum;
bool parsedBiasesMomentum = false;
float biasesDecay;
bool parsedBiasesDecay = false;
float biasesStartingLR;
bool parsedBiasesStartingLR = false;
float biasesLRStep;
bool parsedBiasesLRStep = false;
float biasesLRFactor;
bool parsedBiasesLRFactor = false;
uint paddingX;
bool parsedPaddingX = false;
uint paddingY;
bool parsedPaddingY = false;
uint stride;
bool parsedStride = false;
string activationTypeValue;
ActivationType activationType;
bool parsedActivationType = false;
string prevLayersParam;
bool parsedPrevLayers = false;
vector<string>& tierLines = m_tiersLines[tierIndex];
for (string& line : tierLines)
{
parsedTierSize = parsedTierSize || ParseParameterUint(line, "tierSize", tierSize);
parsedParallelism = parsedParallelism || ParseParameterString(line, "parallelism", parallelismValue);
parsedNumFilters = parsedNumFilters || ParseParameterUint(line, "numFilters", numFilters);
parsedFilterWidth = parsedFilterWidth || ParseParameterUint(line, "filterWidth", filterWidth);
parsedFilterHeight = parsedFilterHeight || ParseParameterUint(line, "filterHeight", filterHeight);
parsedWeightsDeviation = parsedWeightsDeviation || ParseParameterFloat(line, "weightsDeviation", weightsDeviation);
parsedWeightsMomentum = parsedWeightsMomentum || ParseParameterFloat(line, "weightsMomentum", weightsMomentum);
parsedWeightsDecay = parsedWeightsDecay || ParseParameterFloat(line, "weightsDecay", weightsDecay);
parsedWeightsStartingLR = parsedWeightsStartingLR || ParseParameterFloat(line, "weightsStartingLR", weightsStartingLR);
parsedWeightsLRStep = parsedWeightsLRStep || ParseParameterFloat(line, "weightsLRStep", weightsLRStep);
parsedWeightsLRFactor = parsedWeightsLRFactor || ParseParameterFloat(line, "weightsLRFactor", weightsLRFactor);
parsedBiasesInitialValue = parsedBiasesInitialValue || ParseParameterFloat(line, "biasesInitialValue", biasesInitialValue);
parsedBiasesMomentum = parsedBiasesMomentum || ParseParameterFloat(line, "biasesMomentum", biasesMomentum);
parsedBiasesDecay = parsedBiasesDecay || ParseParameterFloat(line, "biasesDecay", biasesDecay);
parsedBiasesStartingLR = parsedBiasesStartingLR || ParseParameterFloat(line, "biasesStartingLR", biasesStartingLR);
parsedBiasesLRStep = parsedBiasesLRStep || ParseParameterFloat(line, "biasesLRStep", biasesLRStep);
parsedBiasesLRFactor = parsedBiasesLRFactor || ParseParameterFloat(line, "biasesLRFactor", biasesLRFactor);
parsedPaddingX = parsedPaddingX || ParseParameterUint(line, "paddingX", paddingX);
parsedPaddingY = parsedPaddingY || ParseParameterUint(line, "paddingY", paddingY);
parsedStride = parsedStride || ParseParameterUint(line, "stride", stride);
parsedActivationType = parsedActivationType || ParseParameterString(line, "activationType", activationTypeValue);
parsedPrevLayers = parsedPrevLayers || ParseParameterString(line, "prevLayers", prevLayersParam);
}
ShipAssert(parsedTierSize, "Can't parse tier size for Convolutional layer!");
ShipAssert(parsedParallelism, "Can't parse parallelism for Convolutional layer!");
ShipAssert(parsedNumFilters, "Can't parse number of filters for Convolutional layer!");
ShipAssert(parsedFilterWidth, "Can't parse filter width for Convolutional layer!");
ShipAssert(parsedFilterHeight, "Can't parse filter height for Convolutional layer!");
ShipAssert(parsedWeightsDeviation, "Can't parse weights deviation for Convolutional layer!");
ShipAssert(parsedWeightsMomentum, "Can't parse weights momentum for Convolutional layer!");
ShipAssert(parsedWeightsDecay, "Can't parse weights decay for Convolutional layer!");
ShipAssert(parsedWeightsStartingLR, "Can't parse weights starting learning rate for Convolutional layer!");
ShipAssert(parsedWeightsLRStep, "Can't parse weights learning rate step for Convolutional layer!");
ShipAssert(parsedWeightsLRFactor, "Can't parse weights learning rate factor for Convolutional layer!");
ShipAssert(parsedBiasesInitialValue, "Can't parse biases initial value for Convolutional layer!");
ShipAssert(parsedBiasesMomentum, "Can't parse biases momentum for Convolutional layer!");
ShipAssert(parsedBiasesDecay, "Can't parse biases decay for Convolutional layer!");
ShipAssert(parsedBiasesStartingLR, "Can't parse biases starting learning rate for Convolutional layer!");
ShipAssert(parsedBiasesLRStep, "Can't parse biases learning rate step for Convolutional layer!");
ShipAssert(parsedBiasesLRFactor, "Can't parse biases learnign rate factor for Convolutional layer!");
ShipAssert(parsedPaddingX, "Can't parse horizontal padding for Convolutional layer!");
ShipAssert(parsedPaddingY, "Can't parse vertical padding for Convolutional layer!");
ShipAssert(parsedStride, "Can't parse stride for Convolutional layer!");
ShipAssert(parsedActivationType, "Can't parse activation type for Convolutional layer!");
if (m_parsingMode == ParsingMode::Prediction)
{
tierSize = 1;
parallelismMode = ParallelismMode::Model;
}
else
{
parallelismMode = parallelismValue == "data" ? ParallelismMode::Data : ParallelismMode::Model;
}
activationType = GetActivationType(activationTypeValue);
for (uint layerIndex = 0; layerIndex < tierSize; ++layerIndex)
{
CudaAssert(cudaSetDevice(layerIndex));
vector<Layer*> prevLayers = FindPrevLayers(parallelismMode, layerIndex, tierSize, tierIndex - 1, prevLayersParam);
uint inputNumChannels;
uint inputDataWidth;
uint inputDataHeight;
uint inputDataCount;
bool holdsInputData;
FindInputParams(prevLayers, layerIndex, tierSize, parallelismMode, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, holdsInputData);
bool holdsActivationGradients = ShouldHoldActivationGradients(parallelismMode, tierSize, tierIndex, layerIndex);
ConvolutionalLayer* convLayer = new ConvolutionalLayer(parallelismMode, m_neuralNet->GetDeviceCalculationStreams()[layerIndex],
m_neuralNet->GetDeviceMemoryStreams()[layerIndex], layerIndex, tierSize, inputNumChannels, inputDataWidth, inputDataHeight,
inputDataCount, holdsInputData, numFilters, filterWidth, filterHeight, inputNumChannels, m_initializeLayersParams, weightsDeviation,
m_initializeLayersParams, biasesInitialValue, weightsMomentum, weightsDecay, weightsLRStep, weightsStartingLR, weightsLRFactor,
biasesMomentum, biasesDecay, biasesLRStep, biasesStartingLR, biasesLRFactor, paddingX, paddingY, stride, activationType,
holdsActivationGradients);
for (Layer* prevLayer : prevLayers)
{
convLayer->AddPrevLayer(prevLayer);
prevLayer->AddNextLayer(convLayer);
}
outLayerTier.push_back(convLayer);
}
}
void ConfigurationParser::ParseResponseNormalizationLayerTier(size_t tierIndex, vector<Layer*>& outLayerTier)
{
uint tierSize;
bool parsedTierSize = false;
string parallelismValue;
ParallelismMode parallelismMode;
bool parsedParallelism = false;
uint depth;
bool parsedDepth = false;
float bias;
bool parsedBias = false;
float alphaCoeff;
bool parsedAlphaCoeff = false;
float betaCoeff;
bool parsedBetaCoeff = false;
string prevLayersParam;
bool parsedPrevLayers = false;
vector<string>& tierLines = m_tiersLines[tierIndex];
for (string& line : tierLines)
{
parsedTierSize = parsedTierSize || ParseParameterUint(line, "tierSize", tierSize);
parsedParallelism = parsedParallelism || ParseParameterString(line, "parallelism", parallelismValue);
parsedDepth = parsedDepth || ParseParameterUint(line, "depth", depth);
parsedBias = parsedBias || ParseParameterFloat(line, "bias", bias);
parsedAlphaCoeff = parsedAlphaCoeff || ParseParameterFloat(line, "alphaCoeff", alphaCoeff);
parsedBetaCoeff = parsedBetaCoeff || ParseParameterFloat(line, "betaCoeff", betaCoeff);
parsedPrevLayers = parsedPrevLayers || ParseParameterString(line, "prevLayers", prevLayersParam);
}
ShipAssert(parsedTierSize, "Can't parse tier size for Response Normalization layer!");
ShipAssert(parsedParallelism, "Can't parse parallelism for Response Normalization layer!");
ShipAssert(parsedDepth, "Can't parse depth for Response Normalization layer!");
ShipAssert(parsedBias, "Can't parse bias for Response Normalization layer!");
ShipAssert(parsedAlphaCoeff, "Can't parse alpha coefficient for Response Normalization layer!");
ShipAssert(parsedBetaCoeff, "Can't parse beta coefficient for Response Normalization layer!");
if (m_parsingMode == ParsingMode::Prediction)
{
tierSize = 1;
parallelismMode = ParallelismMode::Model;
}
else
{
parallelismMode = parallelismValue == "data" ? ParallelismMode::Data : ParallelismMode::Model;
}
for (uint layerIndex = 0; layerIndex < tierSize; ++layerIndex)
{
CudaAssert(cudaSetDevice(layerIndex));
vector<Layer*> prevLayers = FindPrevLayers(parallelismMode, layerIndex, tierSize, tierIndex - 1, prevLayersParam);
uint inputNumChannels;
uint inputDataWidth;
uint inputDataHeight;
uint inputDataCount;
bool holdsInputData;
FindInputParams(prevLayers, layerIndex, tierSize, parallelismMode, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, holdsInputData);
bool holdsActivationGradients = ShouldHoldActivationGradients(parallelismMode, tierSize, tierIndex, layerIndex);
ResponseNormalizationLayer* reNormLayer = new ResponseNormalizationLayer(parallelismMode, m_neuralNet->GetDeviceCalculationStreams()[layerIndex],
m_neuralNet->GetDeviceMemoryStreams()[layerIndex], layerIndex, tierSize, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount,
holdsInputData, depth, bias, alphaCoeff, betaCoeff, holdsActivationGradients);
for (Layer* prevLayer : prevLayers)
{
reNormLayer->AddPrevLayer(prevLayer);
prevLayer->AddNextLayer(reNormLayer);
}
outLayerTier.push_back(reNormLayer);
}
}
void ConfigurationParser::ParseMaxPoolLayerTier(size_t tierIndex, vector<Layer*>& outLayerTier)
{
uint tierSize;
bool parsedTierSize = false;
string parallelismValue;
ParallelismMode parallelismMode;
bool parsedParallelism = false;
uint unitWidth;
bool parsedUnitWidth = false;
uint unitHeight;
bool parsedUnitHeight = false;
uint paddingX;
bool parsedPaddingX = false;
uint paddingY;
bool parsedPaddingY = false;
uint unitStride;
bool parsedUnitStride = false;
string prevLayersParam;
bool parsedPrevLayers = false;
vector<string>& tierLines = m_tiersLines[tierIndex];
for (string& line : tierLines)
{
parsedTierSize = parsedTierSize || ParseParameterUint(line, "tierSize", tierSize);
parsedParallelism = parsedParallelism || ParseParameterString(line, "parallelism", parallelismValue);
parsedUnitWidth = parsedUnitWidth || ParseParameterUint(line, "unitWidth", unitWidth);
parsedUnitHeight = parsedUnitHeight || ParseParameterUint(line, "unitHeight", unitHeight);
parsedPaddingX = parsedPaddingX || ParseParameterUint(line, "paddingX", paddingX);
parsedPaddingY = parsedPaddingY || ParseParameterUint(line, "paddingY", paddingY);
parsedUnitStride = parsedUnitStride || ParseParameterUint(line, "unitStride", unitStride);
parsedPrevLayers = parsedPrevLayers || ParseParameterString(line, "prevLayers", prevLayersParam);
}
ShipAssert(parsedTierSize, "Can't parse tier size for Max Pool layer!");
ShipAssert(parsedParallelism, "Can't parse parallelism for Max Pool layer!");
ShipAssert(parsedUnitWidth, "Can't parse unit width for Max Pool layer!");
ShipAssert(parsedUnitHeight, "Can't parse unit height for Max Pool layer!");
ShipAssert(parsedPaddingX, "Can't parse padding X for Max Pool layer!");
ShipAssert(parsedPaddingY, "Can't parse padding Y for Max Pool layer!");
ShipAssert(parsedUnitStride, "Can't parse unit stride for Max Pool layer!");
if (m_parsingMode == ParsingMode::Prediction)
{
tierSize = 1;
parallelismMode = ParallelismMode::Model;
}
else
{
parallelismMode = parallelismValue == "data" ? ParallelismMode::Data : ParallelismMode::Model;
}
for (uint layerIndex = 0; layerIndex < tierSize; ++layerIndex)
{
CudaAssert(cudaSetDevice(layerIndex));
vector<Layer*> prevLayers = FindPrevLayers(parallelismMode, layerIndex, tierSize, tierIndex - 1, prevLayersParam);
uint inputNumChannels;
uint inputDataWidth;
uint inputDataHeight;
uint inputDataCount;
bool holdsInputData;
FindInputParams(prevLayers, layerIndex, tierSize, parallelismMode, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, holdsInputData);
bool holdsActivationGradients = ShouldHoldActivationGradients(parallelismMode, tierSize, tierIndex, layerIndex);
MaxPoolLayer* maxPoolLayer = new MaxPoolLayer(parallelismMode, m_neuralNet->GetDeviceCalculationStreams()[layerIndex],
m_neuralNet->GetDeviceMemoryStreams()[layerIndex], layerIndex, tierSize, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, holdsInputData,
unitWidth, unitHeight, paddingX, paddingY, unitStride, holdsActivationGradients);
for (Layer* prevLayer : prevLayers)
{
maxPoolLayer->AddPrevLayer(prevLayer);
prevLayer->AddNextLayer(maxPoolLayer);
}
outLayerTier.push_back(maxPoolLayer);
}
}
void ConfigurationParser::ParseStandardLayerTier(size_t tierIndex, vector<Layer*>& outLayerTier)
{
uint tierSize;
bool parsedTierSize = false;
string parallelismValue;
ParallelismMode parallelismMode;
bool parsedParallelism = false;
uint numNeurons;
bool parsedNumNeurons = false;
float weightsDeviation;
bool parsedWeightsDeviation = false;
float weightsMomentum;
bool parsedWeightsMomentum = false;
float weightsDecay;
bool parsedWeightsDecay = false;
float weightsStartingLR;
bool parsedWeightsStartingLR = false;
float weightsLRStep;
bool parsedWeightsLRStep = false;
float weightsLRFactor;
bool parsedWeightsLRFactor = false;
float biasesInitialValue;
bool parsedBiasesInitialValue = false;
float biasesMomentum;
bool parsedBiasesMomentum = false;
float biasesDecay;
bool parsedBiasesDecay = false;
float biasesStartingLR;
bool parsedBiasesStartingLR = false;
float biasesLRStep;
bool parsedBiasesLRStep = false;
float biasesLRFactor;
bool parsedBiasesLRFactor = false;
string activationTypeValue;
ActivationType activationType;
bool parsedActivationType = false;
string prevLayersParam;
bool parsedPrevLayers = false;
vector<string>& tierLines = m_tiersLines[tierIndex];
for (string& line : tierLines)
{
parsedTierSize = parsedTierSize || ParseParameterUint(line, "tierSize", tierSize);
parsedParallelism = parsedParallelism || ParseParameterString(line, "parallelism", parallelismValue);
parsedNumNeurons = parsedNumNeurons || ParseParameterUint(line, "numNeurons", numNeurons);
parsedWeightsDeviation = parsedWeightsDeviation || ParseParameterFloat(line, "weightsDeviation", weightsDeviation);
parsedWeightsMomentum = parsedWeightsMomentum || ParseParameterFloat(line, "weightsMomentum", weightsMomentum);
parsedWeightsDecay = parsedWeightsDecay || ParseParameterFloat(line, "weightsDecay", weightsDecay);
parsedWeightsStartingLR = parsedWeightsStartingLR || ParseParameterFloat(line, "weightsStartingLR", weightsStartingLR);
parsedWeightsLRStep = parsedWeightsLRStep || ParseParameterFloat(line, "weightsLRStep", weightsLRStep);
parsedWeightsLRFactor = parsedWeightsLRFactor || ParseParameterFloat(line, "weightsLRFactor", weightsLRFactor);
parsedBiasesInitialValue = parsedBiasesInitialValue || ParseParameterFloat(line, "biasesInitialValue", biasesInitialValue);
parsedBiasesMomentum = parsedBiasesMomentum || ParseParameterFloat(line, "biasesMomentum", biasesMomentum);
parsedBiasesDecay = parsedBiasesDecay || ParseParameterFloat(line, "biasesDecay", biasesDecay);
parsedBiasesStartingLR = parsedBiasesStartingLR || ParseParameterFloat(line, "biasesStartingLR", biasesStartingLR);
parsedBiasesLRStep = parsedBiasesLRStep || ParseParameterFloat(line, "biasesLRStep", biasesLRStep);
parsedBiasesLRFactor = parsedBiasesLRFactor || ParseParameterFloat(line, "biasesLRFactor", biasesLRFactor);
parsedActivationType = parsedActivationType || ParseParameterString(line, "activationType", activationTypeValue);
parsedPrevLayers = parsedPrevLayers || ParseParameterString(line, "prevLayers", prevLayersParam);
}
ShipAssert(parsedTierSize, "Can't parse tier size for Standard layer!");
ShipAssert(parsedParallelism, "Can't parse parallelism for Standard layer!");
ShipAssert(parsedNumNeurons, "Can't parse number of neurons for Standard layer!");
ShipAssert(parsedWeightsDeviation, "Can't parse weights deviation for Standard layer!");
ShipAssert(parsedWeightsMomentum, "Can't parse weights momentum for Standard layer!");
ShipAssert(parsedWeightsDecay, "Can't parse weights decay for Standard layer!");
ShipAssert(parsedWeightsStartingLR, "Can't parse weights starting learning rate for Standard layer!");
ShipAssert(parsedWeightsLRStep, "Can't parse weights learning rate step for Standard layer!");
ShipAssert(parsedWeightsLRFactor, "Can't parse weights learning rate factor for Standard layer!");
ShipAssert(parsedBiasesInitialValue, "Can't parse biases initial value for Standard layer!");
ShipAssert(parsedBiasesMomentum, "Can't parse biases momentum for Standard layer!");
ShipAssert(parsedBiasesDecay, "Can't parse biases decay for Standard layer!");
ShipAssert(parsedBiasesStartingLR, "Can't parse biases starting learning rate for Standard layer!");
ShipAssert(parsedBiasesLRStep, "Can't parse biases learning rate step for Standard layer!");
ShipAssert(parsedBiasesLRFactor, "Can't parse biases learnign rate factor for Standard layer!");
ShipAssert(parsedActivationType, "Can't parse activation type for Standard layer!");
if (m_parsingMode == ParsingMode::Prediction)
{
tierSize = 1;
parallelismMode = ParallelismMode::Model;
}
else
{
parallelismMode = parallelismValue == "data" ? ParallelismMode::Data : ParallelismMode::Model;
}
activationType = GetActivationType(activationTypeValue);
for (uint layerIndex = 0; layerIndex < tierSize; ++layerIndex)
{
CudaAssert(cudaSetDevice(layerIndex));
vector<Layer*> prevLayers = FindPrevLayers(parallelismMode, layerIndex, tierSize, tierIndex - 1, prevLayersParam);
uint inputNumChannels;
uint inputDataWidth;
uint inputDataHeight;
uint inputDataCount;
bool holdsInputData;
FindInputParams(prevLayers, layerIndex, tierSize, parallelismMode, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, holdsInputData);
bool holdsActivationGradients = ShouldHoldActivationGradients(parallelismMode, tierSize, tierIndex, layerIndex);
StandardLayer* standardLayer = new StandardLayer(parallelismMode, m_neuralNet->GetDeviceCalculationStreams()[layerIndex],
m_neuralNet->GetDeviceMemoryStreams()[layerIndex], m_neuralNet->GetCublasHandles()[layerIndex], layerIndex, tierSize, inputNumChannels,
inputDataWidth, inputDataHeight, inputDataCount, holdsInputData, numNeurons, m_initializeLayersParams, weightsDeviation,
m_initializeLayersParams, biasesInitialValue, weightsMomentum, weightsDecay, weightsLRStep, weightsStartingLR, weightsLRFactor,
biasesMomentum, biasesDecay, biasesLRStep, biasesStartingLR, biasesLRFactor, activationType, holdsActivationGradients);
for (Layer* prevLayer : prevLayers)
{
standardLayer->AddPrevLayer(prevLayer);
prevLayer->AddNextLayer(standardLayer);
}
outLayerTier.push_back(standardLayer);
}
}
void ConfigurationParser::ParseDropoutLayerTier(size_t tierIndex, vector<Layer*>& outLayerTier)
{
uint tierSize;
bool parsedTierSize = false;
string parallelismValue;
ParallelismMode parallelismMode;
bool parsedParallelism = false;
float dropProbability;
bool parsedDropProbability = false;
string prevLayersParam;
bool parsedPrevLayers = false;
vector<string>& tierLines = m_tiersLines[tierIndex];
for (string& line : tierLines)
{
parsedTierSize = parsedTierSize || ParseParameterUint(line, "tierSize", tierSize);
parsedParallelism = parsedParallelism || ParseParameterString(line, "parallelism", parallelismValue);
parsedDropProbability = parsedDropProbability || ParseParameterFloat(line, "dropProbability", dropProbability);
parsedPrevLayers = parsedPrevLayers || ParseParameterString(line, "prevLayers", prevLayersParam);
}
ShipAssert(parsedTierSize, "Can't parse tier size for Dropout layer!");
ShipAssert(parsedParallelism, "Can't parse parallelism for Dropout layer!");
ShipAssert(parsedDropProbability, "Can't parse drop probability for Dropout layer!");
if (m_parsingMode == ParsingMode::Prediction)
{
tierSize = 1;
parallelismMode = ParallelismMode::Model;
}
else
{
parallelismMode = parallelismValue == "data" ? ParallelismMode::Data : ParallelismMode::Model;
}
for (uint layerIndex = 0; layerIndex < tierSize; ++layerIndex)
{
CudaAssert(cudaSetDevice(layerIndex));
vector<Layer*> prevLayers = FindPrevLayers(parallelismMode, layerIndex, tierSize, tierIndex - 1, prevLayersParam);
uint inputNumChannels;
uint inputDataWidth;
uint inputDataHeight;
uint inputDataCount;
bool holdsInputData;
FindInputParams(prevLayers, layerIndex, tierSize, parallelismMode, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, holdsInputData);
bool holdsActivationGradients = ShouldHoldActivationGradients(parallelismMode, tierSize, tierIndex, layerIndex);
DropoutLayer* dropoutLayer = new DropoutLayer(parallelismMode, m_neuralNet->GetDeviceCalculationStreams()[layerIndex],
m_neuralNet->GetDeviceMemoryStreams()[layerIndex], m_neuralNet->GetCurandStatesBuffers()[layerIndex], layerIndex, tierSize, inputNumChannels,
inputDataWidth, inputDataHeight, inputDataCount, holdsInputData, dropProbability, false, holdsActivationGradients);
for (Layer* prevLayer : prevLayers)
{
dropoutLayer->AddPrevLayer(prevLayer);
prevLayer->AddNextLayer(dropoutLayer);
}
outLayerTier.push_back(dropoutLayer);
}
}
void ConfigurationParser::ParseSoftMaxLayerTier(size_t tierIndex, vector<Layer*>& outLayerTier)
{
uint tierSize;
bool parsedTierSize = false;
string parallelismValue;
ParallelismMode parallelismMode;
bool parsedParallelism = false;
string prevLayersParam;
bool parsedPrevLayers = false;
vector<string>& tierLines = m_tiersLines[tierIndex];
for (string& line : tierLines)
{
parsedTierSize = parsedTierSize || ParseParameterUint(line, "tierSize", tierSize);
parsedParallelism = parsedParallelism || ParseParameterString(line, "parallelism", parallelismValue);
parsedPrevLayers = parsedPrevLayers || ParseParameterString(line, "prevLayers", prevLayersParam);
}
ShipAssert(parsedTierSize, "Can't parse tier size for SoftMax layer!");
ShipAssert(parsedParallelism, "Can't parse parallelism for SoftMax layer!");
if (m_parsingMode == ParsingMode::Prediction)
{
tierSize = 1;
parallelismMode = ParallelismMode::Model;
}
else
{
parallelismMode = parallelismValue == "data" ? ParallelismMode::Data : ParallelismMode::Model;
}
for (uint layerIndex = 0; layerIndex < tierSize; ++layerIndex)
{
CudaAssert(cudaSetDevice(layerIndex));
vector<Layer*> prevLayers = FindPrevLayers(parallelismMode, layerIndex, tierSize, tierIndex - 1, prevLayersParam);
uint inputNumChannels;
uint inputDataWidth;
uint inputDataHeight;
uint inputDataCount;
bool holdsInputData;
FindInputParams(prevLayers, layerIndex, tierSize, parallelismMode, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, holdsInputData);
bool holdsActivationGradients = ShouldHoldActivationGradients(parallelismMode, tierSize, tierIndex, layerIndex);
SoftMaxLayer* softMaxLayer = new SoftMaxLayer(parallelismMode, m_neuralNet->GetDeviceCalculationStreams()[layerIndex],
m_neuralNet->GetDeviceMemoryStreams()[layerIndex], inputNumChannels * inputDataWidth * inputDataHeight, inputDataCount, holdsInputData);
for (Layer* prevLayer : prevLayers)
{
softMaxLayer->AddPrevLayer(prevLayer);
prevLayer->AddNextLayer(softMaxLayer);
}
outLayerTier.push_back(softMaxLayer);
}
}
void ConfigurationParser::ParseOutputLayerTier(size_t tierIndex, vector<Layer*>& outLayerTier)
{
string lossFunctionName;
LossFunctionType lossFunction;
bool parsedLossFunction = false;
uint numGuesses;
bool parsedNumGuesses = false;
string prevLayersParam;
bool parsedPrevLayers = false;
vector<string>& tierLines = m_tiersLines[tierIndex];
for (string& line : tierLines)
{
parsedLossFunction = parsedLossFunction || ParseParameterString(line, "lossFunction", lossFunctionName);
parsedNumGuesses = parsedNumGuesses || ParseParameterUint(line, "numGuesses", numGuesses);
parsedPrevLayers = parsedPrevLayers || ParseParameterString(line, "prevLayers", prevLayersParam);
}
ShipAssert(parsedLossFunction, "Can't parse loss function for Output layer!");
lossFunction = GetLossFunctionType(lossFunctionName);
uint tierSize = 1;
uint layerIndex = 0;
ParallelismMode parallelismMode = ParallelismMode::Model;
vector<Layer*> prevLayers = FindPrevLayers(parallelismMode, layerIndex, tierSize, tierIndex - 1, prevLayersParam);
uint inputNumChannels;
uint inputDataWidth;
uint inputDataHeight;
uint inputDataCount;
bool holdsInputData;
FindInputParams(prevLayers, layerIndex, tierSize, parallelismMode, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, holdsInputData);
CudaAssert(cudaSetDevice(0));
InputLayer* inputLayer = static_cast<InputLayer*>(m_layersTiers[0][0]);
OutputLayer* outputLayer = new OutputLayer(m_neuralNet->GetDeviceCalculationStreams()[0], m_neuralNet->GetDeviceMemoryStreams()[0],
inputNumChannels * inputDataWidth * inputDataHeight, inputDataCount, (uint)m_maxNetworkTierSize * inputDataCount, lossFunction, parsedNumGuesses, numGuesses,
inputLayer->GetNumTestPasses());
for (Layer* prevLayer : prevLayers)
{
outputLayer->AddPrevLayer(prevLayer);
prevLayer->AddNextLayer(outputLayer);
}
outLayerTier.push_back(outputLayer);
}
vector<Layer*> ConfigurationParser::ParseLayersTier(size_t tierIndex, LayerType tierLayerType)
{
vector<Layer*> layerTier;
if (tierLayerType == LayerType::Input)
{
ParseInputLayerTier(layerTier);
}
else if (tierLayerType == LayerType::Convolutional)
{
ParseConvolutionalLayerTier(tierIndex, layerTier);
}
else if (tierLayerType == LayerType::ResponseNormalization)
{
ParseResponseNormalizationLayerTier(tierIndex, layerTier);
}
else if (tierLayerType == LayerType::MaxPool)
{
ParseMaxPoolLayerTier(tierIndex, layerTier);
}
else if (tierLayerType == LayerType::Standard)
{
ParseStandardLayerTier(tierIndex, layerTier);
}
else if (tierLayerType == LayerType::Dropout)
{
ParseDropoutLayerTier(tierIndex, layerTier);
}
else if (tierLayerType == LayerType::SoftMax)
{
ParseSoftMaxLayerTier(tierIndex, layerTier);
}
else if (tierLayerType == LayerType::Output)
{
ParseOutputLayerTier(tierIndex, layerTier);
}
return layerTier;
}
|
6a80eab8d54b9b90aa16cd7a42d0af66d3abafbf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <hip/hip_runtime.h>
#include <iostream>
#include <opencv2/core/cuda_stream_accessor.hpp>
#include <opencv2/opencv.hpp>
#include <sstream>
namespace perceive
{
namespace cuda
{
struct Color
{
float elements[3];
__device__ Color(float a, float b, float c)
: elements{a, b, c}
{}
__device__ float& operator[](size_t n) { return elements[n]; }
__device__ const float& operator[](size_t n) const { return elements[n]; }
};
__device__ static inline Color rgb_to_xyz(const Color& in) noexcept
{
const auto R = in[0], G = in[1], B = in[2];
const auto r = (R <= 0.04045) ? R / 12.92 : pow((R + 0.055) / 1.055, 2.4);
const auto g = (G <= 0.04045) ? G / 12.92 : pow((G + 0.055) / 1.055, 2.4);
const auto b = (B <= 0.04045) ? B / 12.92 : pow((B + 0.055) / 1.055, 2.4);
return Color(r * 0.4124564 + g * 0.3575761 + b * 0.1804375,
r * 0.2126729 + g * 0.7151522 + b * 0.0721750,
r * 0.0193339 + g * 0.1191920 + b * 0.9503041);
}
__device__ static inline Color xyz_to_lab(Color XYZ) noexcept
{
const auto X = XYZ[0], Y = XYZ[1], Z = XYZ[2];
//------------------------
// XYZ to LAB conversion
//------------------------
constexpr double epsilon = 0.008856; // actual CIE standard
constexpr double kappa = 7.787; // actual CIE standard
constexpr double Xr = 0.950456; // reference white
constexpr double Yr = 1.0; // reference white
constexpr double Zr = 1.088754; // reference white
const double xr = X / Xr;
const double yr = Y / Yr;
const double zr = Z / Zr;
const auto fx
= (xr > epsilon) ? std::cbrt(xr) : (kappa * xr + 16.0) / 116.0;
const auto fy
= (yr > epsilon) ? std::cbrt(yr) : (kappa * yr + 16.0) / 116.0;
const auto fz
= (zr > epsilon) ? std::cbrt(zr) : (kappa * zr + 16.0) / 116.0;
return Color(116.0 * fy - 16.0, 500.0 * (fx - fy), 200.0 * (fy - fz));
}
__device__ static Color rgb_to_lab(const Color& in) noexcept
{
return xyz_to_lab(rgb_to_xyz(in));
}
__global__ void image_rgb_to_lab(const uint32_t* in_pixel_start,
const uint32_t* in_pixel_end,
float* out_pixel_start,
size_t out_pixel_size)
{
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x * blockDim.x + threadIdx.x;
auto* lab_pixel_bytes = reinterpret_cast<unsigned char*>(out_pixel_start)
+ (out_pixel_size * offset);
for(auto* rgb_pixel = in_pixel_start + offset; rgb_pixel < in_pixel_end;
rgb_pixel += stride) {
Color rgb_color{((*rgb_pixel >> 16) & 0xff) / 255.0f,
((*rgb_pixel >> 8) & 0xff) / 255.0f,
((*rgb_pixel >> 0) & 0xff) / 255.0f};
Color lab{rgb_to_lab(rgb_color)};
auto lab_pixel = reinterpret_cast<float*>(lab_pixel_bytes);
for(int i = 0; i < 3; ++i) { lab_pixel[i] = lab[i]; }
lab_pixel_bytes += out_pixel_size * stride;
}
}
void cuda_convert_rgb_to_lab(
const uint32_t* rgb_pixel_start,
const uint32_t* rgb_pixel_end,
float* lab_pixel_start, // pointer to 'L' value of first pixel. L A and
// B values are assumed to be contiguous
size_t lab_pixel_size, // bytes from one pixel to next
cv::cuda::Stream& stream)
{
hipStream_t cuda_stream{cv::cuda::StreamAccessor::getStream(stream)};
dim3 blockSize{256};
dim3 gridSize{256};
auto device_ptr = [](auto* host_ptr) {
decltype(host_ptr) dev_ptr;
auto err
= hipHostGetDevicePointer((void**) &dev_ptr, (void*) host_ptr, 0);
if(err != hipSuccess) {
std::stringstream buf;
buf << "Cuda error converting host->device pointer "
<< ": " << hipGetErrorString(err);
throw std::runtime_error(buf.str());
}
return dev_ptr;
};
const uint32_t* device_rgb_start{device_ptr(rgb_pixel_start)};
const uint32_t* device_rgb_end{device_rgb_start
+ (rgb_pixel_end - rgb_pixel_start)};
float* device_lab_start{device_ptr(lab_pixel_start)};
hipLaunchKernelGGL(( image_rgb_to_lab), dim3(blockSize), dim3(gridSize), 0, cuda_stream,
device_rgb_start, device_rgb_end, device_lab_start, lab_pixel_size);
hipError_t err = hipPeekAtLastError();
if(err != hipSuccess) {
std::stringstream buf;
buf << "Cuda error at " << __FILE__ << ":" << __LINE__ << ": "
<< hipGetErrorString(err);
throw std::runtime_error(buf.str());
}
}
} // namespace cuda
} // namespace perceive
|
6a80eab8d54b9b90aa16cd7a42d0af66d3abafbf.cu
|
#include <cmath>
#include <cuda_runtime.h>
#include <iostream>
#include <opencv2/core/cuda_stream_accessor.hpp>
#include <opencv2/opencv.hpp>
#include <sstream>
namespace perceive
{
namespace cuda
{
struct Color
{
float elements[3];
__device__ Color(float a, float b, float c)
: elements{a, b, c}
{}
__device__ float& operator[](size_t n) { return elements[n]; }
__device__ const float& operator[](size_t n) const { return elements[n]; }
};
__device__ static inline Color rgb_to_xyz(const Color& in) noexcept
{
const auto R = in[0], G = in[1], B = in[2];
const auto r = (R <= 0.04045) ? R / 12.92 : pow((R + 0.055) / 1.055, 2.4);
const auto g = (G <= 0.04045) ? G / 12.92 : pow((G + 0.055) / 1.055, 2.4);
const auto b = (B <= 0.04045) ? B / 12.92 : pow((B + 0.055) / 1.055, 2.4);
return Color(r * 0.4124564 + g * 0.3575761 + b * 0.1804375,
r * 0.2126729 + g * 0.7151522 + b * 0.0721750,
r * 0.0193339 + g * 0.1191920 + b * 0.9503041);
}
__device__ static inline Color xyz_to_lab(Color XYZ) noexcept
{
const auto X = XYZ[0], Y = XYZ[1], Z = XYZ[2];
//------------------------
// XYZ to LAB conversion
//------------------------
constexpr double epsilon = 0.008856; // actual CIE standard
constexpr double kappa = 7.787; // actual CIE standard
constexpr double Xr = 0.950456; // reference white
constexpr double Yr = 1.0; // reference white
constexpr double Zr = 1.088754; // reference white
const double xr = X / Xr;
const double yr = Y / Yr;
const double zr = Z / Zr;
const auto fx
= (xr > epsilon) ? std::cbrt(xr) : (kappa * xr + 16.0) / 116.0;
const auto fy
= (yr > epsilon) ? std::cbrt(yr) : (kappa * yr + 16.0) / 116.0;
const auto fz
= (zr > epsilon) ? std::cbrt(zr) : (kappa * zr + 16.0) / 116.0;
return Color(116.0 * fy - 16.0, 500.0 * (fx - fy), 200.0 * (fy - fz));
}
__device__ static Color rgb_to_lab(const Color& in) noexcept
{
return xyz_to_lab(rgb_to_xyz(in));
}
__global__ void image_rgb_to_lab(const uint32_t* in_pixel_start,
const uint32_t* in_pixel_end,
float* out_pixel_start,
size_t out_pixel_size)
{
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x * blockDim.x + threadIdx.x;
auto* lab_pixel_bytes = reinterpret_cast<unsigned char*>(out_pixel_start)
+ (out_pixel_size * offset);
for(auto* rgb_pixel = in_pixel_start + offset; rgb_pixel < in_pixel_end;
rgb_pixel += stride) {
Color rgb_color{((*rgb_pixel >> 16) & 0xff) / 255.0f,
((*rgb_pixel >> 8) & 0xff) / 255.0f,
((*rgb_pixel >> 0) & 0xff) / 255.0f};
Color lab{rgb_to_lab(rgb_color)};
auto lab_pixel = reinterpret_cast<float*>(lab_pixel_bytes);
for(int i = 0; i < 3; ++i) { lab_pixel[i] = lab[i]; }
lab_pixel_bytes += out_pixel_size * stride;
}
}
void cuda_convert_rgb_to_lab(
const uint32_t* rgb_pixel_start,
const uint32_t* rgb_pixel_end,
float* lab_pixel_start, // pointer to 'L' value of first pixel. L A and
// B values are assumed to be contiguous
size_t lab_pixel_size, // bytes from one pixel to next
cv::cuda::Stream& stream)
{
cudaStream_t cuda_stream{cv::cuda::StreamAccessor::getStream(stream)};
dim3 blockSize{256};
dim3 gridSize{256};
auto device_ptr = [](auto* host_ptr) {
decltype(host_ptr) dev_ptr;
auto err
= cudaHostGetDevicePointer((void**) &dev_ptr, (void*) host_ptr, 0);
if(err != cudaSuccess) {
std::stringstream buf;
buf << "Cuda error converting host->device pointer "
<< ": " << cudaGetErrorString(err);
throw std::runtime_error(buf.str());
}
return dev_ptr;
};
const uint32_t* device_rgb_start{device_ptr(rgb_pixel_start)};
const uint32_t* device_rgb_end{device_rgb_start
+ (rgb_pixel_end - rgb_pixel_start)};
float* device_lab_start{device_ptr(lab_pixel_start)};
image_rgb_to_lab<<<blockSize, gridSize, 0, cuda_stream>>>(
device_rgb_start, device_rgb_end, device_lab_start, lab_pixel_size);
cudaError_t err = cudaPeekAtLastError();
if(err != cudaSuccess) {
std::stringstream buf;
buf << "Cuda error at " << __FILE__ << ":" << __LINE__ << ": "
<< cudaGetErrorString(err);
throw std::runtime_error(buf.str());
}
}
} // namespace cuda
} // namespace perceive
|
621466e39d2bdf91a4695a1afbe2de07bf9501c9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ExpProbPolynomProbsImpl.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *features = NULL;
hipMalloc(&features, XSIZE*YSIZE);
int batchSize = XSIZE*YSIZE;
const int *splits = NULL;
hipMalloc(&splits, XSIZE*YSIZE);
const float *conditions = NULL;
hipMalloc(&conditions, XSIZE*YSIZE);
const int *polynomOffsets = NULL;
hipMalloc(&polynomOffsets, XSIZE*YSIZE);
int polynomCount = 1;
float lambda = 1;
float *probs = NULL;
hipMalloc(&probs, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ExpProbPolynomProbsImpl), dim3(gridBlock),dim3(threadBlock), 0, 0, features,batchSize,splits,conditions,polynomOffsets,polynomCount,lambda,probs);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ExpProbPolynomProbsImpl), dim3(gridBlock),dim3(threadBlock), 0, 0, features,batchSize,splits,conditions,polynomOffsets,polynomCount,lambda,probs);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ExpProbPolynomProbsImpl), dim3(gridBlock),dim3(threadBlock), 0, 0, features,batchSize,splits,conditions,polynomOffsets,polynomCount,lambda,probs);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
621466e39d2bdf91a4695a1afbe2de07bf9501c9.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ExpProbPolynomProbsImpl.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *features = NULL;
cudaMalloc(&features, XSIZE*YSIZE);
int batchSize = XSIZE*YSIZE;
const int *splits = NULL;
cudaMalloc(&splits, XSIZE*YSIZE);
const float *conditions = NULL;
cudaMalloc(&conditions, XSIZE*YSIZE);
const int *polynomOffsets = NULL;
cudaMalloc(&polynomOffsets, XSIZE*YSIZE);
int polynomCount = 1;
float lambda = 1;
float *probs = NULL;
cudaMalloc(&probs, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ExpProbPolynomProbsImpl<<<gridBlock,threadBlock>>>(features,batchSize,splits,conditions,polynomOffsets,polynomCount,lambda,probs);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ExpProbPolynomProbsImpl<<<gridBlock,threadBlock>>>(features,batchSize,splits,conditions,polynomOffsets,polynomCount,lambda,probs);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ExpProbPolynomProbsImpl<<<gridBlock,threadBlock>>>(features,batchSize,splits,conditions,polynomOffsets,polynomCount,lambda,probs);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
335bb26cab89ed911d568dbb621eaba0afddd5e5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "flo/device/adjacency_matrix_indices.cuh"
#include "flo/device/thread_util.cuh"
#include <thrust/find.h>
#include <cusp/blas/blas.h>
#include <thrust/iterator/discard_iterator.h>
FLO_DEVICE_NAMESPACE_BEGIN
namespace
{
__global__ void
d_adjacency_matrix_indices(const int* __restrict__ di_faces,
const int* __restrict__ di_vertex_adjacency,
const int* __restrict__ di_cumulative_valence,
const int i_nfaces,
int* __restrict__ do_indices)
{
const int fid = blockIdx.x * blockDim.x + threadIdx.x;
// Check we're not out of range
if (fid >= i_nfaces)
return;
// Determine whether we are calculating a column or row major offset
// even threads are col major while odd ones are row major
const uint8_t major = threadIdx.y >= 3;
const uchar3 loop = tri_edge_loop(threadIdx.y - 3 * major);
// Global vertex indices that make this edge
const int2 edge =
make_int2(di_faces[i_nfaces * nth_element(loop, major) + fid],
di_faces[i_nfaces * nth_element(loop, !major) + fid]);
int begin = di_cumulative_valence[edge.x];
int end = di_cumulative_valence[edge.x + 1] - 1;
auto iter = thrust::lower_bound(thrust::seq,
di_vertex_adjacency + begin,
di_vertex_adjacency + end,
edge.y);
const int index = (iter - di_vertex_adjacency) + edge.x + (edge.y > edge.x);
do_indices[i_nfaces * threadIdx.y + fid] = index;
}
} // namespace
FLO_API void adjacency_matrix_indices(
cusp::array2d<int, cusp::device_memory>::const_view di_faces,
cusp::array1d<int, cusp::device_memory>::const_view di_adjacency_keys,
cusp::array1d<int, cusp::device_memory>::const_view di_adjacency,
cusp::array1d<int, cusp::device_memory>::const_view di_cumulative_valence,
cusp::array2d<int, cusp::device_memory>::view do_entry_indices,
cusp::array1d<int, cusp::device_memory>::view do_diagonal_indices,
cusp::array1d<int, cusp::device_memory>::view do_row_indices,
cusp::array1d<int, cusp::device_memory>::view do_column_indices,
thrust::device_ptr<void> dio_temp)
{
// Find the diagonal matrix entry indices
find_diagonal_indices(
di_cumulative_valence, di_adjacency_keys, di_adjacency, do_diagonal_indices);
const int ndiagonals = do_diagonal_indices.size();
const int nnon_diagonals = do_column_indices.size() - ndiagonals;
// This will be used to permute the value iterator
thrust::device_ptr<int> diagonal_stride_ptr{
reinterpret_cast<int*>(dio_temp.get())};
auto diagonal_stride = cusp::make_array1d_view(
diagonal_stride_ptr, diagonal_stride_ptr + nnon_diagonals);
make_skip_indices(do_diagonal_indices, diagonal_stride);
// An iterator for each row, column pair of indices
auto entry_it = thrust::make_zip_iterator(
thrust::make_tuple(do_row_indices.begin(),
do_column_indices.begin()));
// Iterator for non-diagonal matrix entries
auto non_diag_begin =
thrust::make_permutation_iterator(entry_it, diagonal_stride.begin());
// Copy the adjacency keys and the adjacency info as the matrix coords
thrust::copy_n(thrust::make_zip_iterator(thrust::make_tuple(
di_adjacency_keys.begin(), di_adjacency.begin())),
nnon_diagonals,
non_diag_begin);
// Iterator for diagonal matrix entries
auto diag_begin =
thrust::make_permutation_iterator(entry_it, do_diagonal_indices.begin());
// Generate the diagonal entry, row and column indices
thrust::tabulate(
diag_begin, diag_begin + do_diagonal_indices.size(),
[] __device__(const int i) {
return thrust::make_tuple(i, i);
});
dim3 block_dim;
block_dim.y = 6;
block_dim.x = 170;
const int nblocks =
di_faces.num_cols * 6 / (block_dim.x * block_dim.y * block_dim.z) + 1;
hipLaunchKernelGGL(( d_adjacency_matrix_indices), dim3(nblocks), dim3(block_dim), 0, 0,
di_faces.values.begin().base().get(),
di_adjacency.begin().base().get(),
di_cumulative_valence.begin().base().get(),
di_faces.num_cols,
do_entry_indices.values.begin().base().get());
hipDeviceSynchronize();
}
FLO_API void find_diagonal_indices(
cusp::array1d<int, cusp::device_memory>::const_view di_row_offsets,
cusp::array1d<int, cusp::device_memory>::const_view di_row_indices,
cusp::array1d<int, cusp::device_memory>::const_view di_column_indices,
cusp::array1d<int, cusp::device_memory>::view do_diagonals)
{
// Iterates over the matrix entry coordinates, and returns whether the row is
// less than the column, which would mean this is in the upper triangle.
const auto cmp_less_it = thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(di_row_indices.begin(), di_column_indices.begin())),
[] __host__ __device__(const thrust::tuple<int, int>& coord) -> int {
return coord.get<0>() > coord.get<1>();
});
// Then reduce using the keys to find how many in each column are before
// the diagonal entry
thrust::reduce_by_key(di_row_indices.begin(),
di_row_indices.end(),
cmp_less_it,
thrust::make_discard_iterator(),
do_diagonals.begin());
// Sum in the cumulative valence and a count to finalize the diagonal indices
cusp::blas::axpbypcz(do_diagonals,
di_row_offsets.subarray(0, do_diagonals.size()),
cusp::counting_array<int>(do_diagonals.size()),
do_diagonals,
1,
1,
1);
}
FLO_API void make_skip_indices(
cusp::array1d<int, cusp::device_memory>::const_view di_skip_keys,
cusp::array1d<int, cusp::device_memory>::view do_iterator_indices)
{
// Start with zeros
thrust::fill(do_iterator_indices.begin(), do_iterator_indices.end(), 0);
// Add ones in locations where a diagonal exists, need atomic due to neighbours
thrust::for_each_n(
thrust::counting_iterator<int>(0),
di_skip_keys.size(),
[skip_keys = di_skip_keys.begin().base().get(),
out = do_iterator_indices.begin().base().get()] __device__(int x) {
const int skip = skip_keys[x] - x;
atomicAdd(out + skip, 1);
});
// Scan the diagonal markers to produce an offset
thrust::inclusive_scan(do_iterator_indices.begin(),
do_iterator_indices.end(),
do_iterator_indices.begin());
// Add the original entry indices to our offset array
thrust::transform(do_iterator_indices.begin(),
do_iterator_indices.end(),
thrust::counting_iterator<int>(0),
do_iterator_indices.begin(),
thrust::plus<int>());
}
FLO_DEVICE_NAMESPACE_END
|
335bb26cab89ed911d568dbb621eaba0afddd5e5.cu
|
#include "flo/device/adjacency_matrix_indices.cuh"
#include "flo/device/thread_util.cuh"
#include <thrust/find.h>
#include <cusp/blas/blas.h>
#include <thrust/iterator/discard_iterator.h>
FLO_DEVICE_NAMESPACE_BEGIN
namespace
{
__global__ void
d_adjacency_matrix_indices(const int* __restrict__ di_faces,
const int* __restrict__ di_vertex_adjacency,
const int* __restrict__ di_cumulative_valence,
const int i_nfaces,
int* __restrict__ do_indices)
{
const int fid = blockIdx.x * blockDim.x + threadIdx.x;
// Check we're not out of range
if (fid >= i_nfaces)
return;
// Determine whether we are calculating a column or row major offset
// even threads are col major while odd ones are row major
const uint8_t major = threadIdx.y >= 3;
const uchar3 loop = tri_edge_loop(threadIdx.y - 3 * major);
// Global vertex indices that make this edge
const int2 edge =
make_int2(di_faces[i_nfaces * nth_element(loop, major) + fid],
di_faces[i_nfaces * nth_element(loop, !major) + fid]);
int begin = di_cumulative_valence[edge.x];
int end = di_cumulative_valence[edge.x + 1] - 1;
auto iter = thrust::lower_bound(thrust::seq,
di_vertex_adjacency + begin,
di_vertex_adjacency + end,
edge.y);
const int index = (iter - di_vertex_adjacency) + edge.x + (edge.y > edge.x);
do_indices[i_nfaces * threadIdx.y + fid] = index;
}
} // namespace
FLO_API void adjacency_matrix_indices(
cusp::array2d<int, cusp::device_memory>::const_view di_faces,
cusp::array1d<int, cusp::device_memory>::const_view di_adjacency_keys,
cusp::array1d<int, cusp::device_memory>::const_view di_adjacency,
cusp::array1d<int, cusp::device_memory>::const_view di_cumulative_valence,
cusp::array2d<int, cusp::device_memory>::view do_entry_indices,
cusp::array1d<int, cusp::device_memory>::view do_diagonal_indices,
cusp::array1d<int, cusp::device_memory>::view do_row_indices,
cusp::array1d<int, cusp::device_memory>::view do_column_indices,
thrust::device_ptr<void> dio_temp)
{
// Find the diagonal matrix entry indices
find_diagonal_indices(
di_cumulative_valence, di_adjacency_keys, di_adjacency, do_diagonal_indices);
const int ndiagonals = do_diagonal_indices.size();
const int nnon_diagonals = do_column_indices.size() - ndiagonals;
// This will be used to permute the value iterator
thrust::device_ptr<int> diagonal_stride_ptr{
reinterpret_cast<int*>(dio_temp.get())};
auto diagonal_stride = cusp::make_array1d_view(
diagonal_stride_ptr, diagonal_stride_ptr + nnon_diagonals);
make_skip_indices(do_diagonal_indices, diagonal_stride);
// An iterator for each row, column pair of indices
auto entry_it = thrust::make_zip_iterator(
thrust::make_tuple(do_row_indices.begin(),
do_column_indices.begin()));
// Iterator for non-diagonal matrix entries
auto non_diag_begin =
thrust::make_permutation_iterator(entry_it, diagonal_stride.begin());
// Copy the adjacency keys and the adjacency info as the matrix coords
thrust::copy_n(thrust::make_zip_iterator(thrust::make_tuple(
di_adjacency_keys.begin(), di_adjacency.begin())),
nnon_diagonals,
non_diag_begin);
// Iterator for diagonal matrix entries
auto diag_begin =
thrust::make_permutation_iterator(entry_it, do_diagonal_indices.begin());
// Generate the diagonal entry, row and column indices
thrust::tabulate(
diag_begin, diag_begin + do_diagonal_indices.size(),
[] __device__(const int i) {
return thrust::make_tuple(i, i);
});
dim3 block_dim;
block_dim.y = 6;
block_dim.x = 170;
const int nblocks =
di_faces.num_cols * 6 / (block_dim.x * block_dim.y * block_dim.z) + 1;
d_adjacency_matrix_indices<<<nblocks, block_dim>>>(
di_faces.values.begin().base().get(),
di_adjacency.begin().base().get(),
di_cumulative_valence.begin().base().get(),
di_faces.num_cols,
do_entry_indices.values.begin().base().get());
cudaDeviceSynchronize();
}
FLO_API void find_diagonal_indices(
cusp::array1d<int, cusp::device_memory>::const_view di_row_offsets,
cusp::array1d<int, cusp::device_memory>::const_view di_row_indices,
cusp::array1d<int, cusp::device_memory>::const_view di_column_indices,
cusp::array1d<int, cusp::device_memory>::view do_diagonals)
{
// Iterates over the matrix entry coordinates, and returns whether the row is
// less than the column, which would mean this is in the upper triangle.
const auto cmp_less_it = thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(di_row_indices.begin(), di_column_indices.begin())),
[] __host__ __device__(const thrust::tuple<int, int>& coord) -> int {
return coord.get<0>() > coord.get<1>();
});
// Then reduce using the keys to find how many in each column are before
// the diagonal entry
thrust::reduce_by_key(di_row_indices.begin(),
di_row_indices.end(),
cmp_less_it,
thrust::make_discard_iterator(),
do_diagonals.begin());
// Sum in the cumulative valence and a count to finalize the diagonal indices
cusp::blas::axpbypcz(do_diagonals,
di_row_offsets.subarray(0, do_diagonals.size()),
cusp::counting_array<int>(do_diagonals.size()),
do_diagonals,
1,
1,
1);
}
FLO_API void make_skip_indices(
cusp::array1d<int, cusp::device_memory>::const_view di_skip_keys,
cusp::array1d<int, cusp::device_memory>::view do_iterator_indices)
{
// Start with zeros
thrust::fill(do_iterator_indices.begin(), do_iterator_indices.end(), 0);
// Add ones in locations where a diagonal exists, need atomic due to neighbours
thrust::for_each_n(
thrust::counting_iterator<int>(0),
di_skip_keys.size(),
[skip_keys = di_skip_keys.begin().base().get(),
out = do_iterator_indices.begin().base().get()] __device__(int x) {
const int skip = skip_keys[x] - x;
atomicAdd(out + skip, 1);
});
// Scan the diagonal markers to produce an offset
thrust::inclusive_scan(do_iterator_indices.begin(),
do_iterator_indices.end(),
do_iterator_indices.begin());
// Add the original entry indices to our offset array
thrust::transform(do_iterator_indices.begin(),
do_iterator_indices.end(),
thrust::counting_iterator<int>(0),
do_iterator_indices.begin(),
thrust::plus<int>());
}
FLO_DEVICE_NAMESPACE_END
|
4e2ec8fd655e3679adf233f11700663a0dadc7f1.hip
|
// !!! This is a file automatically generated by hipify!!!
///////////////////////////////////////////////
// File: primaryFunctions.cu //
///////////////////////////////////////////////
#include <iostream>
#include <hiprand/hiprand.h>
#include <cusolverDn.h>
#include "declarations.h"
#include "rocblas.h"
#include "kernel.h"
#include "cuLibs.h"
#include "reduction.h"
#include "errorTypes.h"
#include "Globals.h"
#include "mem.h"
using namespace std;
///////////////////////////////////////////////////////////////////////////////
/* Global variables */
extern int *devInfo;
extern hipblasHandle_t handle;
extern hiprandGenerator_t generator;
extern hipsolverDnHandle_t cusolverH;
extern datatype *I, *KK, *KK2, *KVOXELS, *KVOXELS2, *TIMECOMPK, *TIMECOMPK2, *Kv, *TIMECOMPVOXELS, *X, *D, *S, *W, *d_W;
///////////////////////////////////////////////////////////////////////////////
/* Computes the error of the approximation */
datatype ErrorComp() {
gpu_blas_mmul(handle, D, S, TIMECOMPVOXELS, 0, Globals::rowsX, K, K, Globals::colsX); // D*S
subtractMatrices << <(Globals::rowsX*Globals::colsX) / 1024 + 1, 1024 >> > (TIMECOMPVOXELS, X, TIMECOMPVOXELS, Globals::rowsX*Globals::colsX); // Y - D*S
return sqrt(frobeniusNorm(handle, TIMECOMPVOXELS, Globals::rowsX*Globals::colsX) / (Globals::rowsX*Globals::colsX)); // sqrt(norm(Y - D*S, 'fro') / (TIMECOMP*VOXELS))
}
/* Calculates the new coefficients according to the MM algorithm */
void NewCoef(datatype lamb) {
hipError_t err;
datatype cS, Err, bound;
int t, Ts, Es;
if (ES != -1) {
Ts = 500;
Es = ES;
}
else {
Ts = TS;
Es = 0;
}
gpu_blas_mmul(handle, D, D, KK, 1, Globals::rowsX, K, Globals::rowsX, K); // Dux = D'*D;
gpu_blas_mmul(handle, KK, KK, KK2, 1, K, K, K, K); // Dux.'*Dux
cS = findMaxSqrtEigenvalue(cusolverH, KK2, W, d_W, devInfo, K); // cS = max(sqrt(eig(Dux.'*Dux)))
gpu_blas_mmul(handle, D, X, KVOXELS, 1, Globals::rowsX, K, Globals::rowsX, Globals::colsX); // D'*Y
matrixDivNum << <(K*Globals::colsX) / 1024 + 1, 1024 >> > (KVOXELS, K*Globals::colsX, cS); // DY = D'*Y/cS
matrixDivNum << < (K*K) / 1024 + 1, 1024 >> > (KK, K*K, cS); // Dux/cS
subtractMatrices << <(K*K) / 1024 + 1, 1024 >> > (KK2, I, KK, K*K); //Aq = I - Dux / cS;
Err = 1.0;
t = 1;
bound = (datatype)0.5*lamb / cS;
while (t <= Ts && Err>Es) {
gpu_blas_mmul(handle, KK2, S, KVOXELS2, 0, K, K, K, Globals::colsX); // Aq*S
addMatrices << <(K*Globals::colsX) / 1024 + 1, 1024 >> > (KVOXELS2, KVOXELS, KVOXELS2, K*Globals::colsX); // A = DY+Aq*S
wthresh << <(K*Globals::colsX) / 1024 + 1, 1024 >> > (KVOXELS2, K*Globals::colsX, bound); // A = wthresh(A, 's', 0.5*lam / cS)
if ((err = hipMemcpy(S, KVOXELS2, K * Globals::colsX * sizeof(datatype), hipMemcpyDeviceToDevice)) != hipSuccess) {
printf("hipMemcpy S failed: %s\n", hipGetErrorString(err));
cleanup();
exit(-1);
} // S=A
t = t + 1;
}
}
/* Calculates the new Dictionary atoms according to the MM algorithm */
void NewDict() {
int t, Ed, Td;
hipError_t err;
datatype cD, Err;
if (ED != -1) {
Ed = ED;
Td = 100;
}
else {
Td = TD;
Ed = 0;
}
gpu_blas_mmul(handle, S, S, KK, 2, K, Globals::colsX, K, Globals::colsX); // Sux = S*S'
gpu_blas_mmul(handle, KK, KK, KK2, 1, K, K, K, K); // Sux.'*Sux
cD = findMaxSqrtEigenvalue(cusolverH, KK2, W, d_W, devInfo, K); // cD = max(sqrt(eig(Sux.'*Sux)))
gpu_blas_mmul(handle, X, S, TIMECOMPK, 2, Globals::rowsX, Globals::colsX, K, Globals::colsX); // Y*S'
matrixDivNum << <(Globals::rowsX*K) / 1024 + 1, 1024 >> > (TIMECOMPK, Globals::rowsX*K, cD); // YS = Y*S'/cD
matrixDivNum << <(K*K) / 1024 + 1, 1024 >> > (KK, K*K, cD); // Sux/cD
subtractMatrices << <(K*K) / 1024 + 1, 1024 >> > (KK2, I, KK, K*K); // Bq = I-Sux/cD
Err = 1.0;
t = 1;
while (t <= Td && Err > Ed) {
gpu_blas_mmul(handle, D, KK2, TIMECOMPK2, 0, Globals::rowsX, K, K, K); // D*Bq
addMatrices << <(Globals::rowsX*K) / 1024 + 1, 1024 >> > (TIMECOMPK2, TIMECOMPK, TIMECOMPK2, Globals::rowsX*K); // B = YS + D*Bq
normalizeReduction(TIMECOMPK2, Kv); // Kv = sqrt(sum(B.^2)); Kv(Kv < ccl) = 1;
rdivide << <(Globals::rowsX*K) / 1024 + 1, 1024 >> > (TIMECOMPK2, Globals::rowsX, K, Kv); //B = bsxfun(@rdivide,B,Kv)
if ((err = hipMemcpy(D, TIMECOMPK2, Globals::rowsX * K * sizeof(datatype), hipMemcpyDeviceToDevice)) != hipSuccess) {
printf("hipMemcpy failed D: %s\n", hipGetErrorString(err));
cleanup();
exit(-1);
} //D = B
t = t + 1;
}
}
/* Majorized minimization */
void mom() {
int i;
datatype lamb;
lamb = (datatype)LAMBDA*sqrt(frobeniusNorm(handle, X, Globals::rowsX*Globals::colsX) / (Globals::rowsX*Globals::colsX)); //lamb = lamb*sqrt(norm(Y,'fro')/(T*N));
randn(generator, D, Globals::rowsX * K); // D = randn(T,K)
randn(generator, S, K * Globals::colsX); // S = randn(K,N)
dim3 grid(1, 1);
dim3 threads(K, K);
initIdentityGPU << <grid, threads >> > (I, K, K);
printf("Initial error\t: %f\n\n", ErrorComp());
for (i = 0; i < ITER; i++) {
NewCoef(lamb);
NewDict();
printf("Iteration %d\t: %f \n", i + 1, ErrorComp());
}
printf("\nFinal error\t: %f \n", ErrorComp());
}
|
4e2ec8fd655e3679adf233f11700663a0dadc7f1.cu
|
///////////////////////////////////////////////
// File: primaryFunctions.cu //
///////////////////////////////////////////////
#include <iostream>
#include <curand.h>
#include <cusolverDn.h>
#include "declarations.h"
#include "cublas_v2.h"
#include "kernel.h"
#include "cuLibs.h"
#include "reduction.h"
#include "errorTypes.h"
#include "Globals.h"
#include "mem.h"
using namespace std;
///////////////////////////////////////////////////////////////////////////////
/* Global variables */
extern int *devInfo;
extern cublasHandle_t handle;
extern curandGenerator_t generator;
extern cusolverDnHandle_t cusolverH;
extern datatype *I, *KK, *KK2, *KVOXELS, *KVOXELS2, *TIMECOMPK, *TIMECOMPK2, *Kv, *TIMECOMPVOXELS, *X, *D, *S, *W, *d_W;
///////////////////////////////////////////////////////////////////////////////
/* Computes the error of the approximation */
datatype ErrorComp() {
gpu_blas_mmul(handle, D, S, TIMECOMPVOXELS, 0, Globals::rowsX, K, K, Globals::colsX); // D*S
subtractMatrices << <(Globals::rowsX*Globals::colsX) / 1024 + 1, 1024 >> > (TIMECOMPVOXELS, X, TIMECOMPVOXELS, Globals::rowsX*Globals::colsX); // Y - D*S
return sqrt(frobeniusNorm(handle, TIMECOMPVOXELS, Globals::rowsX*Globals::colsX) / (Globals::rowsX*Globals::colsX)); // sqrt(norm(Y - D*S, 'fro') / (TIMECOMP*VOXELS))
}
/* Calculates the new coefficients according to the MM algorithm */
void NewCoef(datatype lamb) {
cudaError_t err;
datatype cS, Err, bound;
int t, Ts, Es;
if (ES != -1) {
Ts = 500;
Es = ES;
}
else {
Ts = TS;
Es = 0;
}
gpu_blas_mmul(handle, D, D, KK, 1, Globals::rowsX, K, Globals::rowsX, K); // Dux = D'*D;
gpu_blas_mmul(handle, KK, KK, KK2, 1, K, K, K, K); // Dux.'*Dux
cS = findMaxSqrtEigenvalue(cusolverH, KK2, W, d_W, devInfo, K); // cS = max(sqrt(eig(Dux.'*Dux)))
gpu_blas_mmul(handle, D, X, KVOXELS, 1, Globals::rowsX, K, Globals::rowsX, Globals::colsX); // D'*Y
matrixDivNum << <(K*Globals::colsX) / 1024 + 1, 1024 >> > (KVOXELS, K*Globals::colsX, cS); // DY = D'*Y/cS
matrixDivNum << < (K*K) / 1024 + 1, 1024 >> > (KK, K*K, cS); // Dux/cS
subtractMatrices << <(K*K) / 1024 + 1, 1024 >> > (KK2, I, KK, K*K); //Aq = I - Dux / cS;
Err = 1.0;
t = 1;
bound = (datatype)0.5*lamb / cS;
while (t <= Ts && Err>Es) {
gpu_blas_mmul(handle, KK2, S, KVOXELS2, 0, K, K, K, Globals::colsX); // Aq*S
addMatrices << <(K*Globals::colsX) / 1024 + 1, 1024 >> > (KVOXELS2, KVOXELS, KVOXELS2, K*Globals::colsX); // A = DY+Aq*S
wthresh << <(K*Globals::colsX) / 1024 + 1, 1024 >> > (KVOXELS2, K*Globals::colsX, bound); // A = wthresh(A, 's', 0.5*lam / cS)
if ((err = cudaMemcpy(S, KVOXELS2, K * Globals::colsX * sizeof(datatype), cudaMemcpyDeviceToDevice)) != cudaSuccess) {
printf("cudaMemcpy S failed: %s\n", cudaGetErrorString(err));
cleanup();
exit(-1);
} // S=A
t = t + 1;
}
}
/* Calculates the new Dictionary atoms according to the MM algorithm */
void NewDict() {
int t, Ed, Td;
cudaError_t err;
datatype cD, Err;
if (ED != -1) {
Ed = ED;
Td = 100;
}
else {
Td = TD;
Ed = 0;
}
gpu_blas_mmul(handle, S, S, KK, 2, K, Globals::colsX, K, Globals::colsX); // Sux = S*S'
gpu_blas_mmul(handle, KK, KK, KK2, 1, K, K, K, K); // Sux.'*Sux
cD = findMaxSqrtEigenvalue(cusolverH, KK2, W, d_W, devInfo, K); // cD = max(sqrt(eig(Sux.'*Sux)))
gpu_blas_mmul(handle, X, S, TIMECOMPK, 2, Globals::rowsX, Globals::colsX, K, Globals::colsX); // Y*S'
matrixDivNum << <(Globals::rowsX*K) / 1024 + 1, 1024 >> > (TIMECOMPK, Globals::rowsX*K, cD); // YS = Y*S'/cD
matrixDivNum << <(K*K) / 1024 + 1, 1024 >> > (KK, K*K, cD); // Sux/cD
subtractMatrices << <(K*K) / 1024 + 1, 1024 >> > (KK2, I, KK, K*K); // Bq = I-Sux/cD
Err = 1.0;
t = 1;
while (t <= Td && Err > Ed) {
gpu_blas_mmul(handle, D, KK2, TIMECOMPK2, 0, Globals::rowsX, K, K, K); // D*Bq
addMatrices << <(Globals::rowsX*K) / 1024 + 1, 1024 >> > (TIMECOMPK2, TIMECOMPK, TIMECOMPK2, Globals::rowsX*K); // B = YS + D*Bq
normalizeReduction(TIMECOMPK2, Kv); // Kv = sqrt(sum(B.^2)); Kv(Kv < ccl) = 1;
rdivide << <(Globals::rowsX*K) / 1024 + 1, 1024 >> > (TIMECOMPK2, Globals::rowsX, K, Kv); //B = bsxfun(@rdivide,B,Kv)
if ((err = cudaMemcpy(D, TIMECOMPK2, Globals::rowsX * K * sizeof(datatype), cudaMemcpyDeviceToDevice)) != cudaSuccess) {
printf("cudaMemcpy failed D: %s\n", cudaGetErrorString(err));
cleanup();
exit(-1);
} //D = B
t = t + 1;
}
}
/* Majorized minimization */
void mom() {
int i;
datatype lamb;
lamb = (datatype)LAMBDA*sqrt(frobeniusNorm(handle, X, Globals::rowsX*Globals::colsX) / (Globals::rowsX*Globals::colsX)); //lamb = lamb*sqrt(norm(Y,'fro')/(T*N));
randn(generator, D, Globals::rowsX * K); // D = randn(T,K)
randn(generator, S, K * Globals::colsX); // S = randn(K,N)
dim3 grid(1, 1);
dim3 threads(K, K);
initIdentityGPU << <grid, threads >> > (I, K, K);
printf("Initial error\t: %f\n\n", ErrorComp());
for (i = 0; i < ITER; i++) {
NewCoef(lamb);
NewDict();
printf("Iteration %d\t: %f \n", i + 1, ErrorComp());
}
printf("\nFinal error\t: %f \n", ErrorComp());
}
|
b84d215fa7774a88b8fff08ff9e903bb9d6f7e87.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void upSweep(int n, int factorPlusOne, int factor, int addTimes, int *idata)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < addTimes)
{
int newIndex = (factorPlusOne * (index + 1)) - 1;
if (newIndex < n)
{
idata[newIndex] += idata[newIndex - factor];
//if (newIndex == n - 1)
//{
// idata[newIndex] = 0;
//}
}
}
}//end upSweep function
__global__ void downSweep(int n, int factorPlusOne, int factor, int addTimes, int *idata)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < addTimes)
{
int newIndex = (factorPlusOne * (index + 1)) - 1;
if (newIndex < n)
{
int leftChild = idata[newIndex - factor];
idata[newIndex - factor] = idata[newIndex];
idata[newIndex] += leftChild;
}
}
}//end downSweep function
__global__ void resizeArray(int n, int new_n, int *idata)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < new_n && index >= n)
{
idata[index] = 0;
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
* Notes:
Most of the text in Part 2 applies.
This uses the "Work-Efficient" algorithm from GPU Gems 3, Section 39.2.2.
This can be done in place - it doesn't suffer from the race conditions of the naive method,
since there won't be a case where one thread writes to and another thread reads from the same location in the array.
Beware of errors in Example 39-2. Test non-power-of-two-sized arrays.
Since the work-efficient scan operates on a binary tree structure, it works best with arrays with power-of-two length.
Make sure your implementation works on non-power-of-two sized arrays (see ilog2ceil).
This requires extra memory, so your intermediate array sizes
will need to be rounded to the next power of two.
*/
void scan(int n, int *odata, const int *idata) {
// TODO
//If non-power-of-two sized array, round to next power of two
int new_n = 1 << ilog2ceil(n);
dim3 fullBlocksPerGrid((new_n + blockSize - 1) / blockSize);
int *inArray;
hipMalloc((void**)&inArray, new_n * sizeof(int));
checkCUDAError("hipMalloc inArray failed!");
//Copy input data to device array and resize if necessary
hipMemcpy(inArray, idata, sizeof(int) * new_n, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( resizeArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, n, new_n, inArray);
bool timerHasStartedElsewhere = false;
try
{
timer().startGpuTimer();
}
catch (std::runtime_error &e)
{
timerHasStartedElsewhere = true;
}
dim3 newNumBlocks = fullBlocksPerGrid;
//Up sweep
for (int d = 0; d <= ilog2ceil(n) - 1; d++)
{
int factorPlusOne = 1 << (d + 1); //2^(d + 1)
int factor = 1 << d; //2^d
int addTimes = 1 << (ilog2ceil(n) - 1 - d);
newNumBlocks = ((new_n / factorPlusOne) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( upSweep), dim3(newNumBlocks), dim3(blockSize), 0, 0, new_n, factorPlusOne, factor, addTimes, inArray);
//Make sure the GPU finishes before the next iteration of the loop
hipDeviceSynchronize();
}
//Down sweep
int lastElem = 0;
hipMemcpy(inArray + (new_n - 1), &lastElem, sizeof(int) * 1, hipMemcpyHostToDevice);
for (int d = ilog2ceil(n) - 1; d >= 0; d--)
{
int factorPlusOne = 1 << (d + 1); //2^(d + 1)
int factor = 1 << d; //2^d
int addTimes = 1 << (ilog2ceil(n) - 1 - d);
newNumBlocks = ((new_n / factor) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( downSweep), dim3(newNumBlocks), dim3(blockSize), 0, 0, new_n, factorPlusOne, factor, addTimes, inArray);
hipDeviceSynchronize();
}
if (!timerHasStartedElsewhere)
{
timer().endGpuTimer();
}
//Transfer to odata
hipMemcpy(odata, inArray, sizeof(int) * (new_n), hipMemcpyDeviceToHost);
//Free the arrays
hipFree(inArray);
}//end scan function
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
// TODO
int new_n = 1 << ilog2ceil(n);
dim3 fullBlocksPerGrid((new_n + blockSize - 1) / blockSize);
int *inArray;
int *boolsArray;
hipMalloc((void**)&inArray, new_n * sizeof(int));
checkCUDAError("hipMalloc inArray failed!");
hipMalloc((void**)&boolsArray, new_n * sizeof(int));
checkCUDAError("hipMalloc boolsArray failed!");
int* scan_in = (int *)malloc(sizeof(int) * new_n);
int* scan_out = (int *)malloc(sizeof(int) * new_n);
int *scatter_in;
int *scatter_out;
hipMalloc((void**)&scatter_in, new_n * sizeof(int));
checkCUDAError("hipMalloc scatter_in failed!");
hipMalloc((void**)&scatter_out, new_n * sizeof(int));
checkCUDAError("hipMalloc scatter_out failed!");
hipDeviceSynchronize();
//Copy input data to device array
hipMemcpy(inArray, idata, sizeof(int) * new_n, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( resizeArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, n, new_n, inArray);
timer().startGpuTimer();
//Call kernMapToBoolean to map values to bool array
hipLaunchKernelGGL(( Common::kernMapToBoolean), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, new_n, boolsArray, inArray);
//Copy back to host array, find how many fulfilled condition, and run exclusive scan
hipMemcpy(scan_in, boolsArray, sizeof(int) * new_n, hipMemcpyDeviceToHost);
int numPassedElements = 0;
for (int i = 0; i < new_n; i++)
{
if (scan_in[i] == 1)
{
numPassedElements++;
}
}
scan(new_n, scan_out, scan_in);
//Copy output of CPU scan to scatter device array
hipMemcpy(scatter_in, scan_out, sizeof(int) * new_n, hipMemcpyHostToDevice);
//Call kernScatter with scanned boolsArray
hipLaunchKernelGGL(( Common::kernScatter), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, new_n, scatter_out, inArray, boolsArray, scatter_in);
timer().endGpuTimer();
//SCATTER OUT ISNT GONNA BE THE SAME SIZE AS N
//Should I replace n with numPassedElements?
hipMemcpy(odata, scatter_out, sizeof(int) * numPassedElements, hipMemcpyDeviceToHost);
//Free the arrays
free(scan_in);
free(scan_out);
hipFree(inArray);
hipFree(boolsArray);
hipFree(scatter_in);
hipFree(scatter_out);
checkCUDAError("hipFree failed!");
return numPassedElements;
}//end compact function
}//end namespace Efficient
}//end namespace StreamCompaction
|
b84d215fa7774a88b8fff08ff9e903bb9d6f7e87.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void upSweep(int n, int factorPlusOne, int factor, int addTimes, int *idata)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < addTimes)
{
int newIndex = (factorPlusOne * (index + 1)) - 1;
if (newIndex < n)
{
idata[newIndex] += idata[newIndex - factor];
//if (newIndex == n - 1)
//{
// idata[newIndex] = 0;
//}
}
}
}//end upSweep function
__global__ void downSweep(int n, int factorPlusOne, int factor, int addTimes, int *idata)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < addTimes)
{
int newIndex = (factorPlusOne * (index + 1)) - 1;
if (newIndex < n)
{
int leftChild = idata[newIndex - factor];
idata[newIndex - factor] = idata[newIndex];
idata[newIndex] += leftChild;
}
}
}//end downSweep function
__global__ void resizeArray(int n, int new_n, int *idata)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < new_n && index >= n)
{
idata[index] = 0;
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
* Notes:
Most of the text in Part 2 applies.
This uses the "Work-Efficient" algorithm from GPU Gems 3, Section 39.2.2.
This can be done in place - it doesn't suffer from the race conditions of the naive method,
since there won't be a case where one thread writes to and another thread reads from the same location in the array.
Beware of errors in Example 39-2. Test non-power-of-two-sized arrays.
Since the work-efficient scan operates on a binary tree structure, it works best with arrays with power-of-two length.
Make sure your implementation works on non-power-of-two sized arrays (see ilog2ceil).
This requires extra memory, so your intermediate array sizes
will need to be rounded to the next power of two.
*/
void scan(int n, int *odata, const int *idata) {
// TODO
//If non-power-of-two sized array, round to next power of two
int new_n = 1 << ilog2ceil(n);
dim3 fullBlocksPerGrid((new_n + blockSize - 1) / blockSize);
int *inArray;
cudaMalloc((void**)&inArray, new_n * sizeof(int));
checkCUDAError("cudaMalloc inArray failed!");
//Copy input data to device array and resize if necessary
cudaMemcpy(inArray, idata, sizeof(int) * new_n, cudaMemcpyHostToDevice);
resizeArray<<<fullBlocksPerGrid, blockSize>>>(n, new_n, inArray);
bool timerHasStartedElsewhere = false;
try
{
timer().startGpuTimer();
}
catch (std::runtime_error &e)
{
timerHasStartedElsewhere = true;
}
dim3 newNumBlocks = fullBlocksPerGrid;
//Up sweep
for (int d = 0; d <= ilog2ceil(n) - 1; d++)
{
int factorPlusOne = 1 << (d + 1); //2^(d + 1)
int factor = 1 << d; //2^d
int addTimes = 1 << (ilog2ceil(n) - 1 - d);
newNumBlocks = ((new_n / factorPlusOne) + blockSize - 1) / blockSize;
upSweep<<<newNumBlocks, blockSize>>>(new_n, factorPlusOne, factor, addTimes, inArray);
//Make sure the GPU finishes before the next iteration of the loop
cudaThreadSynchronize();
}
//Down sweep
int lastElem = 0;
cudaMemcpy(inArray + (new_n - 1), &lastElem, sizeof(int) * 1, cudaMemcpyHostToDevice);
for (int d = ilog2ceil(n) - 1; d >= 0; d--)
{
int factorPlusOne = 1 << (d + 1); //2^(d + 1)
int factor = 1 << d; //2^d
int addTimes = 1 << (ilog2ceil(n) - 1 - d);
newNumBlocks = ((new_n / factor) + blockSize - 1) / blockSize;
downSweep<<<newNumBlocks, blockSize>>>(new_n, factorPlusOne, factor, addTimes, inArray);
cudaThreadSynchronize();
}
if (!timerHasStartedElsewhere)
{
timer().endGpuTimer();
}
//Transfer to odata
cudaMemcpy(odata, inArray, sizeof(int) * (new_n), cudaMemcpyDeviceToHost);
//Free the arrays
cudaFree(inArray);
}//end scan function
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
// TODO
int new_n = 1 << ilog2ceil(n);
dim3 fullBlocksPerGrid((new_n + blockSize - 1) / blockSize);
int *inArray;
int *boolsArray;
cudaMalloc((void**)&inArray, new_n * sizeof(int));
checkCUDAError("cudaMalloc inArray failed!");
cudaMalloc((void**)&boolsArray, new_n * sizeof(int));
checkCUDAError("cudaMalloc boolsArray failed!");
int* scan_in = (int *)malloc(sizeof(int) * new_n);
int* scan_out = (int *)malloc(sizeof(int) * new_n);
int *scatter_in;
int *scatter_out;
cudaMalloc((void**)&scatter_in, new_n * sizeof(int));
checkCUDAError("cudaMalloc scatter_in failed!");
cudaMalloc((void**)&scatter_out, new_n * sizeof(int));
checkCUDAError("cudaMalloc scatter_out failed!");
cudaThreadSynchronize();
//Copy input data to device array
cudaMemcpy(inArray, idata, sizeof(int) * new_n, cudaMemcpyHostToDevice);
resizeArray<<<fullBlocksPerGrid, blockSize>>>(n, new_n, inArray);
timer().startGpuTimer();
//Call kernMapToBoolean to map values to bool array
Common::kernMapToBoolean<<<fullBlocksPerGrid, blockSize>>>(new_n, boolsArray, inArray);
//Copy back to host array, find how many fulfilled condition, and run exclusive scan
cudaMemcpy(scan_in, boolsArray, sizeof(int) * new_n, cudaMemcpyDeviceToHost);
int numPassedElements = 0;
for (int i = 0; i < new_n; i++)
{
if (scan_in[i] == 1)
{
numPassedElements++;
}
}
scan(new_n, scan_out, scan_in);
//Copy output of CPU scan to scatter device array
cudaMemcpy(scatter_in, scan_out, sizeof(int) * new_n, cudaMemcpyHostToDevice);
//Call kernScatter with scanned boolsArray
Common::kernScatter<<<fullBlocksPerGrid, blockSize>>>(new_n, scatter_out, inArray, boolsArray, scatter_in);
timer().endGpuTimer();
//SCATTER OUT ISNT GONNA BE THE SAME SIZE AS N
//Should I replace n with numPassedElements?
cudaMemcpy(odata, scatter_out, sizeof(int) * numPassedElements, cudaMemcpyDeviceToHost);
//Free the arrays
free(scan_in);
free(scan_out);
cudaFree(inArray);
cudaFree(boolsArray);
cudaFree(scatter_in);
cudaFree(scatter_out);
checkCUDAError("cudaFree failed!");
return numPassedElements;
}//end compact function
}//end namespace Efficient
}//end namespace StreamCompaction
|
6823b18b363987cedf63ad272ddaa6b5f9914f76.hip
|
// !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes CUDA
#include <hip/hip_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
// includes thrust
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
using namespace thrust::placeholders;
////////////////////////////////////////////////////////////////////////////////
// Inline functions
////////////////////////////////////////////////////////////////////////////////
inline __device__ float calculateDistanceSquared(
float x1, float y1, float z1,
float x2, float y2, float z2) {
return (x1 - x2)*(x1 - x2) +
(y1 - y2)*(y1 - y2) +
(z1 - z2)*(z1 - z2);
}
inline float random_float(){
return (float)rand()/(float)RAND_MAX;
}
////////////////////////////////////////////////////////////////////////////////
// Kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void
calculateDistances(float *points_x, float* points_y, float* points_z,
float *centroid_x, float* centroid_y, float* centroid_z,
int *closest, int k, int n)
{
const unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < n){
unsigned index_min;
float min_distance = 10e9;
for(int i = 0; i < k; i++){
float distance = calculateDistanceSquared(
points_x[tid], points_y[tid], points_z[tid],
centroid_x[i], centroid_y[i], centroid_z[i]);
if(min_distance > distance){
index_min = i;
min_distance = distance;
}
}
closest[tid] = index_min;
}
}
__global__ void reduce(
float *points_x, float* points_y, float* points_z,
float *centroid_x, float* centroid_y, float* centroid_z,
int *closest, int *sums, int n) {
const unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < n){
int centroid_num = closest[tid];
atomicAdd(¢roid_x[centroid_num], points_x[tid]);
atomicAdd(¢roid_y[centroid_num], points_y[tid]);
atomicAdd(¢roid_z[centroid_num], points_z[tid]);
atomicAdd(&sums[centroid_num], 1);
}
}
__global__ void
calculateMean(float *centroid_x, float* centroid_y, float* centroid_z, int *sums, int k)
{
const unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < k){
int sum = sums[tid];
if(sum != 0){
centroid_x[tid] /= sums[tid];
centroid_y[tid] /= sums[tid];
centroid_z[tid] /= sums[tid];
} else {
centroid_x[tid] = 0.0f;
centroid_y[tid] = 0.0f;
centroid_z[tid] = 0.0f;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Function headers
////////////////////////////////////////////////////////////////////////////////
void randomize(thrust::host_vector<float>& point_x, thrust::host_vector<float>& point_y, thrust::host_vector<float>& point_z,
thrust::host_vector<float>& centroid_x, thrust::host_vector<float>& centroid_y, thrust::host_vector<float>& centroid_z,
int k, int n);
bool stop(thrust::host_vector<float>& h_centroid_x, thrust::host_vector<float>& h_centroid_y, thrust::host_vector<float>& h_centroid_z,
thrust::device_vector<float>& d_centroid_x, thrust::device_vector<float>& d_centroid_y, thrust::device_vector<float>& d_centroid_z,
int k, float epsilon);
void write(thrust::host_vector<float>& h_x, thrust::host_vector<float>& h_y, thrust::host_vector<float>& h_z, int n, const char* filename);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv){
srand(0);
//setup parameters
int k = 8, n = 3200000;
float epsilon = 0.0001f;
//initialize host vectors
thrust::host_vector<float> h_points_x(n), h_points_y(n), h_points_z(n);
thrust::host_vector<float> h_centroids_x(k), h_centroids_y(k), h_centroids_z(k);
//generate data
randomize(h_points_x, h_points_y, h_points_z, h_centroids_x, h_centroids_y, h_centroids_z, k, n);
//initialize device vectors, copy data from host vectors
thrust::device_vector<float> d_points_x(h_points_x), d_points_y(h_points_y), d_points_z(h_points_z);
thrust::device_vector<float> d_centroids_x = h_centroids_x, d_centroids_y = h_centroids_y, d_centroids_z = h_centroids_z;
thrust::device_vector<int> d_closest(n);
thrust::device_vector<int> d_sums(k);
//start timers
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// setup execution parameters
dim3 grid(n / 256 + 1, 1, 1);
dim3 threads(256, 1, 1);
dim3 grid2(k / 1024 + 1, 1, 1);
dim3 threads2(1024, 1, 1);
int iter = 0;
do {
thrust::fill(d_closest.begin(), d_closest.end(), 0);
//for each point in data set find closest centroid
hipLaunchKernelGGL(( calculateDistances), dim3(grid), dim3(threads) , 0, 0,
thrust::raw_pointer_cast(&d_points_x[0]),
thrust::raw_pointer_cast(&d_points_y[0]),
thrust::raw_pointer_cast(&d_points_z[0]),
thrust::raw_pointer_cast(&d_centroids_x[0]),
thrust::raw_pointer_cast(&d_centroids_y[0]),
thrust::raw_pointer_cast(&d_centroids_z[0]),
thrust::raw_pointer_cast(&d_closest[0]),
k, n);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed");
//clear old centroids data
thrust::fill(d_centroids_x.begin(), d_centroids_x.end(), 0.0f);
thrust::fill(d_centroids_y.begin(), d_centroids_y.end(), 0.0f);
thrust::fill(d_centroids_z.begin(), d_centroids_z.end(), 0.0f);
thrust::fill(d_sums.begin(), d_sums.end(), 0);
//sum up for each centroid distance to point from point's perspective
hipLaunchKernelGGL(( reduce), dim3(grid), dim3(threads) , 0, 0,
thrust::raw_pointer_cast(&d_points_x[0]),
thrust::raw_pointer_cast(&d_points_y[0]),
thrust::raw_pointer_cast(&d_points_z[0]),
thrust::raw_pointer_cast(&d_centroids_x[0]),
thrust::raw_pointer_cast(&d_centroids_y[0]),
thrust::raw_pointer_cast(&d_centroids_z[0]),
thrust::raw_pointer_cast(&d_closest[0]),
thrust::raw_pointer_cast(&d_sums[0]),
n);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed");
//now calculate mean from the previously calculated sum it is a new centroid
hipLaunchKernelGGL(( calculateMean), dim3(grid2), dim3(threads2) , 0, 0,
thrust::raw_pointer_cast(&d_centroids_x[0]),
thrust::raw_pointer_cast(&d_centroids_y[0]),
thrust::raw_pointer_cast(&d_centroids_z[0]),
thrust::raw_pointer_cast(&d_sums[0]), k);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed");
//one iteration done
iter = iter + 1;
} while(
//check if change is small compared to the last iteration
!stop(h_centroids_x, h_centroids_y, h_centroids_z,
d_centroids_x, d_centroids_y, d_centroids_z,
k, epsilon) || iter > 100);
//stop timers and print summary
sdkStopTimer(&timer);
printf("Processing time: %f (ms), %d iterations\n", sdkGetTimerValue(&timer), iter);
sdkDeleteTimer(&timer);
//write output of the program to a file
write(h_points_x, h_points_y, h_points_z, n, "points.txt");
write(h_centroids_x, h_centroids_y, h_centroids_z, k, "centroids.txt");
printf("Exiting...\n");
exit(EXIT_SUCCESS);
}
//generate data
void randomize(thrust::host_vector<float>& point_x, thrust::host_vector<float>& point_y, thrust::host_vector<float>& point_z,
thrust::host_vector<float>& centroid_x, thrust::host_vector<float>& centroid_y, thrust::host_vector<float>& centroid_z,
int k, int n){
for(int i = 0; i < k; i++){
float x = random_float();
float y = random_float();
float z = random_float();
centroid_x[i] = x;
centroid_y[i] = y;
centroid_z[i] = z;
}
for(int i = 0; i < n; i++){
float x = random_float();
float y = random_float();
float z = random_float();
point_x[i] = x;
point_y[i] = y;
point_z[i] = z;
}
}
//check if alghoritm should stop, i.e. if norm of centroids vector is lesser
//than given epsilon
bool stop(thrust::host_vector<float>& h_centroid_x, thrust::host_vector<float>& h_centroid_y, thrust::host_vector<float>& h_centroid_z,
thrust::device_vector<float>& d_centroid_x, thrust::device_vector<float>& d_centroid_y, thrust::device_vector<float>& d_centroid_z,
int k, float epsilon){
thrust::host_vector<float>
h_centroid_x_new(d_centroid_x),
h_centroid_y_new(d_centroid_y),
h_centroid_z_new(d_centroid_z);
float norm = 0.0f;
for(int i = 0; i < k; i++){
norm += abs(h_centroid_x_new[i] - h_centroid_x[i]) +
abs(h_centroid_y_new[i] - h_centroid_y[i]) +
abs(h_centroid_z_new[i] - h_centroid_z[i]);
}
norm /= (k * 3);
h_centroid_x = h_centroid_x_new;
h_centroid_y = h_centroid_y_new;
h_centroid_z = h_centroid_z_new;
printf("norm: %f\n", norm);
if(norm > epsilon) return false;
else return true;
}
// writes vectors to a specified file
void write(thrust::host_vector<float>& h_x, thrust::host_vector<float>& h_y, thrust::host_vector<float>& h_z, int n, const char* filename){
std::ofstream myfile;
myfile.open(filename);
for(int i = 0; i < n; i++){
myfile << h_x[i] << " " << h_y[i] << " " << h_z[i] << " " << std::endl;
}
myfile.close();
}
|
6823b18b363987cedf63ad272ddaa6b5f9914f76.cu
|
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes CUDA
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
// includes thrust
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
using namespace thrust::placeholders;
////////////////////////////////////////////////////////////////////////////////
// Inline functions
////////////////////////////////////////////////////////////////////////////////
inline __device__ float calculateDistanceSquared(
float x1, float y1, float z1,
float x2, float y2, float z2) {
return (x1 - x2)*(x1 - x2) +
(y1 - y2)*(y1 - y2) +
(z1 - z2)*(z1 - z2);
}
inline float random_float(){
return (float)rand()/(float)RAND_MAX;
}
////////////////////////////////////////////////////////////////////////////////
// Kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void
calculateDistances(float *points_x, float* points_y, float* points_z,
float *centroid_x, float* centroid_y, float* centroid_z,
int *closest, int k, int n)
{
const unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < n){
unsigned index_min;
float min_distance = 10e9;
for(int i = 0; i < k; i++){
float distance = calculateDistanceSquared(
points_x[tid], points_y[tid], points_z[tid],
centroid_x[i], centroid_y[i], centroid_z[i]);
if(min_distance > distance){
index_min = i;
min_distance = distance;
}
}
closest[tid] = index_min;
}
}
__global__ void reduce(
float *points_x, float* points_y, float* points_z,
float *centroid_x, float* centroid_y, float* centroid_z,
int *closest, int *sums, int n) {
const unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < n){
int centroid_num = closest[tid];
atomicAdd(¢roid_x[centroid_num], points_x[tid]);
atomicAdd(¢roid_y[centroid_num], points_y[tid]);
atomicAdd(¢roid_z[centroid_num], points_z[tid]);
atomicAdd(&sums[centroid_num], 1);
}
}
__global__ void
calculateMean(float *centroid_x, float* centroid_y, float* centroid_z, int *sums, int k)
{
const unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < k){
int sum = sums[tid];
if(sum != 0){
centroid_x[tid] /= sums[tid];
centroid_y[tid] /= sums[tid];
centroid_z[tid] /= sums[tid];
} else {
centroid_x[tid] = 0.0f;
centroid_y[tid] = 0.0f;
centroid_z[tid] = 0.0f;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Function headers
////////////////////////////////////////////////////////////////////////////////
void randomize(thrust::host_vector<float>& point_x, thrust::host_vector<float>& point_y, thrust::host_vector<float>& point_z,
thrust::host_vector<float>& centroid_x, thrust::host_vector<float>& centroid_y, thrust::host_vector<float>& centroid_z,
int k, int n);
bool stop(thrust::host_vector<float>& h_centroid_x, thrust::host_vector<float>& h_centroid_y, thrust::host_vector<float>& h_centroid_z,
thrust::device_vector<float>& d_centroid_x, thrust::device_vector<float>& d_centroid_y, thrust::device_vector<float>& d_centroid_z,
int k, float epsilon);
void write(thrust::host_vector<float>& h_x, thrust::host_vector<float>& h_y, thrust::host_vector<float>& h_z, int n, const char* filename);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv){
srand(0);
//setup parameters
int k = 8, n = 3200000;
float epsilon = 0.0001f;
//initialize host vectors
thrust::host_vector<float> h_points_x(n), h_points_y(n), h_points_z(n);
thrust::host_vector<float> h_centroids_x(k), h_centroids_y(k), h_centroids_z(k);
//generate data
randomize(h_points_x, h_points_y, h_points_z, h_centroids_x, h_centroids_y, h_centroids_z, k, n);
//initialize device vectors, copy data from host vectors
thrust::device_vector<float> d_points_x(h_points_x), d_points_y(h_points_y), d_points_z(h_points_z);
thrust::device_vector<float> d_centroids_x = h_centroids_x, d_centroids_y = h_centroids_y, d_centroids_z = h_centroids_z;
thrust::device_vector<int> d_closest(n);
thrust::device_vector<int> d_sums(k);
//start timers
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// setup execution parameters
dim3 grid(n / 256 + 1, 1, 1);
dim3 threads(256, 1, 1);
dim3 grid2(k / 1024 + 1, 1, 1);
dim3 threads2(1024, 1, 1);
int iter = 0;
do {
thrust::fill(d_closest.begin(), d_closest.end(), 0);
//for each point in data set find closest centroid
calculateDistances<<< grid, threads >>>(
thrust::raw_pointer_cast(&d_points_x[0]),
thrust::raw_pointer_cast(&d_points_y[0]),
thrust::raw_pointer_cast(&d_points_z[0]),
thrust::raw_pointer_cast(&d_centroids_x[0]),
thrust::raw_pointer_cast(&d_centroids_y[0]),
thrust::raw_pointer_cast(&d_centroids_z[0]),
thrust::raw_pointer_cast(&d_closest[0]),
k, n);
cudaDeviceSynchronize();
getLastCudaError("Kernel execution failed");
//clear old centroids data
thrust::fill(d_centroids_x.begin(), d_centroids_x.end(), 0.0f);
thrust::fill(d_centroids_y.begin(), d_centroids_y.end(), 0.0f);
thrust::fill(d_centroids_z.begin(), d_centroids_z.end(), 0.0f);
thrust::fill(d_sums.begin(), d_sums.end(), 0);
//sum up for each centroid distance to point from point's perspective
reduce<<< grid, threads >>>(
thrust::raw_pointer_cast(&d_points_x[0]),
thrust::raw_pointer_cast(&d_points_y[0]),
thrust::raw_pointer_cast(&d_points_z[0]),
thrust::raw_pointer_cast(&d_centroids_x[0]),
thrust::raw_pointer_cast(&d_centroids_y[0]),
thrust::raw_pointer_cast(&d_centroids_z[0]),
thrust::raw_pointer_cast(&d_closest[0]),
thrust::raw_pointer_cast(&d_sums[0]),
n);
cudaDeviceSynchronize();
getLastCudaError("Kernel execution failed");
//now calculate mean from the previously calculated sum it is a new centroid
calculateMean<<< grid2, threads2 >>>(
thrust::raw_pointer_cast(&d_centroids_x[0]),
thrust::raw_pointer_cast(&d_centroids_y[0]),
thrust::raw_pointer_cast(&d_centroids_z[0]),
thrust::raw_pointer_cast(&d_sums[0]), k);
cudaDeviceSynchronize();
getLastCudaError("Kernel execution failed");
//one iteration done
iter = iter + 1;
} while(
//check if change is small compared to the last iteration
!stop(h_centroids_x, h_centroids_y, h_centroids_z,
d_centroids_x, d_centroids_y, d_centroids_z,
k, epsilon) || iter > 100);
//stop timers and print summary
sdkStopTimer(&timer);
printf("Processing time: %f (ms), %d iterations\n", sdkGetTimerValue(&timer), iter);
sdkDeleteTimer(&timer);
//write output of the program to a file
write(h_points_x, h_points_y, h_points_z, n, "points.txt");
write(h_centroids_x, h_centroids_y, h_centroids_z, k, "centroids.txt");
printf("Exiting...\n");
exit(EXIT_SUCCESS);
}
//generate data
void randomize(thrust::host_vector<float>& point_x, thrust::host_vector<float>& point_y, thrust::host_vector<float>& point_z,
thrust::host_vector<float>& centroid_x, thrust::host_vector<float>& centroid_y, thrust::host_vector<float>& centroid_z,
int k, int n){
for(int i = 0; i < k; i++){
float x = random_float();
float y = random_float();
float z = random_float();
centroid_x[i] = x;
centroid_y[i] = y;
centroid_z[i] = z;
}
for(int i = 0; i < n; i++){
float x = random_float();
float y = random_float();
float z = random_float();
point_x[i] = x;
point_y[i] = y;
point_z[i] = z;
}
}
//check if alghoritm should stop, i.e. if norm of centroids vector is lesser
//than given epsilon
bool stop(thrust::host_vector<float>& h_centroid_x, thrust::host_vector<float>& h_centroid_y, thrust::host_vector<float>& h_centroid_z,
thrust::device_vector<float>& d_centroid_x, thrust::device_vector<float>& d_centroid_y, thrust::device_vector<float>& d_centroid_z,
int k, float epsilon){
thrust::host_vector<float>
h_centroid_x_new(d_centroid_x),
h_centroid_y_new(d_centroid_y),
h_centroid_z_new(d_centroid_z);
float norm = 0.0f;
for(int i = 0; i < k; i++){
norm += abs(h_centroid_x_new[i] - h_centroid_x[i]) +
abs(h_centroid_y_new[i] - h_centroid_y[i]) +
abs(h_centroid_z_new[i] - h_centroid_z[i]);
}
norm /= (k * 3);
h_centroid_x = h_centroid_x_new;
h_centroid_y = h_centroid_y_new;
h_centroid_z = h_centroid_z_new;
printf("norm: %f\n", norm);
if(norm > epsilon) return false;
else return true;
}
// writes vectors to a specified file
void write(thrust::host_vector<float>& h_x, thrust::host_vector<float>& h_y, thrust::host_vector<float>& h_z, int n, const char* filename){
std::ofstream myfile;
myfile.open(filename);
for(int i = 0; i < n; i++){
myfile << h_x[i] << " " << h_y[i] << " " << h_z[i] << " " << std::endl;
}
myfile.close();
}
|
9bb8656d1042c58cbe83fb25be905148c52f1598.hip
|
// !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=[64,64,64] --gridDim=[64,64] --no-inline
#include <hip/hip_runtime.h>
__global__ void foo() {
int d = 1;
while(
__invariant(__implies(__enabled(), d == 1 | d == 2 | d == 4 | d == 8 | d == 16 | d == 32 | d == 64)),
__invariant(d > 0),
d < 64) {
d <<= 1;
}
}
|
9bb8656d1042c58cbe83fb25be905148c52f1598.cu
|
//pass
//--blockDim=[64,64,64] --gridDim=[64,64] --no-inline
#include <cuda.h>
__global__ void foo() {
int d = 1;
while(
__invariant(__implies(__enabled(), d == 1 | d == 2 | d == 4 | d == 8 | d == 16 | d == 32 | d == 64)),
__invariant(d > 0),
d < 64) {
d <<= 1;
}
}
|
5f9c7bd9644535d5a4f4bdb89702619ad736adb4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <grid.hpp>
#include <grid.cuh>
#include <assertions.hpp>
#include <definitions.cuh>
template <typename T>
int test_gridpoints(const int nx, const int ny, const T h) {
printf("Testing grid points generation with nx = %d ny = %d and h = %f \n", nx, ny, h);
size_t num_bytes = nx * ny * sizeof(T);
T *x = (T*)malloc(num_bytes);
grid_x(x, nx, ny, h);
approx(x[0], 0.0);
approx(x[1], h);
approx(x[nx-1], h * (nx - 1));
approx(x[(ny - 1) * nx - 1], h * (nx - 1));
approx(x[(ny - 1) * nx], 0.0);
grid_y(x, nx, ny, h);
approx(x[0], 0.0);
approx(x[1], 0.0);
approx(x[nx-1], 0.0);
approx(x[nx], h);
approx(x[nx * (ny - 1)], (ny - 1) * h);
free(x);
return test_report();
}
template <typename T>
int test_gridnorm(const int nx, const int ny) {
T hx = 1.0 / (nx - 1);
T hy = 1.0 / (ny - 1);
printf(
"Testing L1 and L2 norm on grid with nx = %d ny = %d and hx = %f hy = %f "
"\n",
nx, ny, hx, hy);
size_t num_bytes = nx * ny * sizeof(T);
T *x = (T*)malloc(num_bytes);
for (int i = 0; i < nx * ny; ++i)
x[i] = 1.0;
T norm = grid_l1norm(x, nx, ny, hx, hy, 0, 0, 0, 0);
approx(norm, nx * ny * hx * hy);
norm = grid_l1norm(x, nx, ny, hx, hy, 1, 0, 0, 0);
approx(norm, (nx - 1) * ny * hx * hy);
norm = grid_l1norm(x, nx, ny, hx, hy, 0, 1, 0, 0);
approx(norm, nx * (ny - 1) * hx * hy);
norm = grid_l1norm(x, nx, ny, hx, hy, 0, 0, 1, 1);
approx(norm, (nx - 1) * (ny - 1) * hx * hy);
norm = grid_l2norm(x, nx, ny, hx, hy, 0, 0, 0, 0);
approx(norm, nx * ny * hx * hy);
norm = grid_l2norm(x, nx, ny, hx, hy, 1, 0, 0, 0);
approx(norm, (nx - 1) * ny * hx * hy);
norm = grid_l2norm(x, nx, ny, hx, hy, 0, 1, 0, 0);
approx(norm, nx * (ny - 1) * hx * hy);
// Extract the second last point in the corner
grid_x(x, nx, ny, hx);
norm = grid_l2norm(x, nx, ny, hx, hy, nx - 2, ny - 2, 1, 1);
approx(norm, (1 - hx) * (1 - hx) * hx * hy);
return test_report();
}
template <typename T>
int cuda_test_gridnorm(const int nx, const int ny) {
T hx = 1.0 / (nx - 1);
T hy = 1.0 / (ny - 1);
printf(
"CUDA Testing L1 and L2 norm on grid with nx = %d ny = %d and hx = %f hy = %f "
"\n",
nx, ny, hx, hy);
size_t num_bytes = nx * ny * sizeof(T);
T *x = (T*)malloc(num_bytes);
for (int i = 0; i < nx * ny; ++i)
x[i] = 1.0;
T *d_x;
CUDACHECK(hipMalloc((void**)&d_x, num_bytes));
CUDACHECK(hipMemcpy(d_x, x, num_bytes, hipMemcpyHostToDevice));
CUDANorm<L1NORM, T> l1norm;
T norm = l1norm(d_x, nx, ny, hx, hy);
approx(norm, nx * ny * hx * hy);
CUDANorm<L2NORM, T> l2norm;
norm = l2norm(d_x, nx, ny, hx, hy);
approx(norm, nx * ny * hx * hy);
CUDACHECK(hipFree(d_x));
return test_report();
}
template <typename T>
int cuda_test_gridpoints(const int nx, const int ny, const T h) {
printf("CUDA Testing grid points generation with nx = %d ny = %d and h = %f \n", nx, ny, h);
size_t num_bytes = nx * ny * sizeof(T);
T *d_x, *x;
hipMalloc((void**)&d_x, num_bytes);
x = (T*)malloc(num_bytes);
cuda_grid_x(d_x, nx, ny, h);
CUDACHECK(hipMemcpy(x, d_x, num_bytes, hipMemcpyDeviceToHost));
approx(x[0], 0.0);
approx(x[1], h);
approx(x[nx-1], h * (nx - 1));
approx(x[(ny - 1) * nx - 1], h * (nx - 1));
approx(x[(ny - 1) * nx], 0.0);
cuda_grid_y(d_x, nx, ny, h);
CUDACHECK(hipMemcpy(x, d_x, num_bytes, hipMemcpyDeviceToHost));
approx(x[0], 0.0);
approx(x[1], 0.0);
approx(x[nx-1], 0.0);
approx(x[nx], h);
approx(x[nx * (ny - 1)], (ny - 1) * h);
free(x);
CUDACHECK(hipFree(d_x));
return test_report();
}
template <typename T>
void restriction(const char *axis, const T *xc, T *yc, T *zc, const int nxc,
const int nyc, const T hc, const T *xf, const int nxf, const int nyf, const T hf) {
grid_restrict(yc, nxc, nyc, xf, nxf, nyf);
grid_subtract(zc, xc, yc, nxc, nyc);
T l1_err = grid_l1norm(zc, nxc, nyc, hc, hc, 1, 1, 1, 1);
T l2_err = grid_l2norm(zc, nxc, nyc, hc, hc, 1, 1, 1, 1);
approx(l1_err, 0.0);
approx(l2_err, 0.0);
printf("Restriction in %s l1-error: %g, l2-error: %g \n", axis, l1_err, l2_err);
}
template <typename T>
void prolongation(const char *axis, const T *xf, T *yf, T *zf, const int nxf,
const int nyf, const T hf, const T *xc, const int nxc, const int nyc, const T hc) {
grid_prolongate(yf, nxf, nyf, xc, nxc, nyc);
grid_subtract(zf, xf, yf, nxf, nyf);
T l1_err = grid_l1norm(zf, nxf, nyf, hf, hf);
T l2_err = grid_l2norm(zf, nxf, nyf, hf, hf);
approx(l1_err, 0.0);
approx(l2_err, 0.0);
printf("Prolongation in %s l1-error: %g, l2-error: %g \n", axis, l1_err, l2_err);
}
template <typename T>
void restriction_prolongation_info(const int nxf, const int nyf, const T hf, const int nxc,
const int nyc, const T hc) {
printf(
"Testing grid restriction and prolongation with fine grid "
"[%d %d], hf=%g, and coarse grid [%d %d], hc=%g. \n",
nxf, nyf, hf, nxc, nyc, hc);
}
template <typename T>
int test_restriction_prolongation(const int nxc, const int nyc, const T hc) {
T hf = 0.5 * hc;
int nxf = 2 * (nxc - 1) + 1;
int nyf = 2 * (nyc - 1) + 1;
restriction_prolongation_info(nxf, nyf, hf, nxc, nyc, hc);
size_t num_bytesf = sizeof(T) * nxf * nyf;
size_t num_bytesc = sizeof(T) * nxc * nyc;
T *xf = (T*)malloc(num_bytesf);
T *yf = (T*)malloc(num_bytesf);
T *zf = (T*)malloc(num_bytesf);
T *xc = (T*)malloc(num_bytesc);
T *yc = (T*)malloc(num_bytesc);
T *zc = (T*)malloc(num_bytesc);
// Test restriction and prolongation in the x-direction
grid_x(xf, nxf, nyf, hf);
grid_x(xc, nxc, nyc, hc);
restriction("x", xc, yc, zc, nxc, nyc, hc, xf, nxf, nyf, hf);
prolongation("x", xf, yf, zf, nxf, nyf, hf, xc, nxc, nyc, hc);
// Test restriction and prolongation in the y-direction
grid_y(xf, nxf, nyf, hf);
grid_y(xc, nxc, nyc, hc);
restriction("y", xc, yc, zc, nxc, nyc, hc, xf, nxf, nyf, hf);
prolongation("y", xf, yf, zf, nxf, nyf, hf, xc, nxc, nyc, hc);
free(xf);
free(yf);
free(zf);
free(xc);
free(yc);
free(zc);
return test_report();
}
template <typename T>
void cuda_restriction(const char *axis, const T *xc, T *yc, T *zc, const int nxc,
const int nyc, const T hc, const T *xf, const int nxf, const int nyf, const T hf) {
size_t num_bytes = sizeof(T) * nxc * nyc;
cuda_grid_restrict(yc, nxc, nyc, xf, nxf, nyf);
cuda_grid_subtract(zc, xc, yc, nxc, nyc);
T *hzc = (T*)malloc(num_bytes);
CUDACHECK(hipMemcpy(hzc, zc, num_bytes, hipMemcpyDeviceToHost));
//TODO: Compute norms on device once there's support for bounds control
T l1_err = grid_l1norm(hzc, nxc, nyc, hc, hc, 1, 1, 1, 1);
T l2_err = grid_l2norm(hzc, nxc, nyc, hc, hc, 1, 1, 1, 1);
approx(l1_err, 0.0);
approx(l2_err, 0.0);
printf("CUDA Restriction in %s l1-error: %g, l2-error: %g \n", axis, l1_err, l2_err);
}
template <typename T>
void cuda_prolongation(const char *axis, const T *xf, T *yf, T *zf, const int nxf,
const int nyf, const T hf, const T *xc, const int nxc, const int nyc, const T hc) {
size_t num_bytes = sizeof(T) * nxf * nyf;
cuda_grid_prolongate(yf, nxf, nyf, xc, nxc, nyc);
cuda_grid_subtract(zf, xf, yf, nxf, nyf);
T *hzf = (T*)malloc(num_bytes);
CUDACHECK(hipMemcpy(hzf, zf, num_bytes, hipMemcpyDeviceToHost));
//TODO: Compute norms on device once there's support for bounds control
T l1_err = grid_l1norm(hzf, nxf, nyf, hf, hf, 1, 1, 1, 1);
T l2_err = grid_l2norm(hzf, nxf, nyf, hf, hf, 1, 1, 1, 1);
approx(l1_err, 0.0);
approx(l2_err, 0.0);
printf("CUDA Prolongation in %s l1-error: %g, l2-error: %g \n", axis, l1_err, l2_err);
}
template <typename T>
int cuda_test_restriction_prolongation(const int nxc, const int nyc, const T hc) {
T hf = 0.5 * hc;
int nxf = 2 * (nxc - 1) + 1;
int nyf = 2 * (nyc - 1) + 1;
restriction_prolongation_info(nxf, nyf, hf, nxc, nyc, hc);
size_t num_bytesf = sizeof(T) * nxf * nyf;
size_t num_bytesc = sizeof(T) * nxc * nyc;
T *xf, *yf, *zf, *xc, *yc, *zc;
hipMalloc((void**)&xf, num_bytesf);
hipMalloc((void**)&yf, num_bytesf);
hipMalloc((void**)&zf, num_bytesf);
hipMalloc((void**)&xc, num_bytesc);
hipMalloc((void**)&yc, num_bytesc);
hipMalloc((void**)&zc, num_bytesc);
cuda_grid_x(xf, nxf, nyf, hf);
cuda_grid_x(xc, nxc, nyc, hc);
cuda_restriction("x", xc, yc, zc, nxc, nyc, hc, xf, nxf, nyf, hf);
cuda_prolongation("x", xf, yf, zf, nxf, nyf, hf, xc, nxc, nyc, hc);
cuda_grid_y(yf, nxf, nyf, hf);
cuda_grid_y(yc, nxc, nyc, hc);
cuda_restriction("y", xc, yc, zc, nxc, nyc, hc, xf, nxf, nyf, hf);
cuda_prolongation("y", xf, yf, zf, nxf, nyf, hf, xc, nxc, nyc, hc);
CUDACHECK(hipFree(xf));
CUDACHECK(hipFree(yf));
CUDACHECK(hipFree(zf));
CUDACHECK(hipFree(xc));
CUDACHECK(hipFree(yc));
CUDACHECK(hipFree(zc));
return test_report();
}
int main(int argc, char **argv) {
int err = 0;
{
int nx = 20;
int ny = 20;
double h = 1.0;
err |= test_gridpoints(nx, ny, h);
err |= cuda_test_gridpoints(nx, ny, h);
}
{
int nx = 21;
int ny = 20;
double h = 0.5;
err |= test_gridpoints(nx, ny, h);
}
{
int nx = 21;
int ny = 31;
err |= test_gridnorm<double>(nx, ny);
err |= cuda_test_gridnorm<double>(nx, ny);
}
{
int nxc = 4;
int nyc = 4;
double hf = 0.3;
err |= test_restriction_prolongation(nxc, nyc, hf);
err |= cuda_test_restriction_prolongation(nxc, nyc, hf);
}
return err;
}
|
5f9c7bd9644535d5a4f4bdb89702619ad736adb4.cu
|
#include <stdio.h>
#include <grid.hpp>
#include <grid.cuh>
#include <assertions.hpp>
#include <definitions.cuh>
template <typename T>
int test_gridpoints(const int nx, const int ny, const T h) {
printf("Testing grid points generation with nx = %d ny = %d and h = %f \n", nx, ny, h);
size_t num_bytes = nx * ny * sizeof(T);
T *x = (T*)malloc(num_bytes);
grid_x(x, nx, ny, h);
approx(x[0], 0.0);
approx(x[1], h);
approx(x[nx-1], h * (nx - 1));
approx(x[(ny - 1) * nx - 1], h * (nx - 1));
approx(x[(ny - 1) * nx], 0.0);
grid_y(x, nx, ny, h);
approx(x[0], 0.0);
approx(x[1], 0.0);
approx(x[nx-1], 0.0);
approx(x[nx], h);
approx(x[nx * (ny - 1)], (ny - 1) * h);
free(x);
return test_report();
}
template <typename T>
int test_gridnorm(const int nx, const int ny) {
T hx = 1.0 / (nx - 1);
T hy = 1.0 / (ny - 1);
printf(
"Testing L1 and L2 norm on grid with nx = %d ny = %d and hx = %f hy = %f "
"\n",
nx, ny, hx, hy);
size_t num_bytes = nx * ny * sizeof(T);
T *x = (T*)malloc(num_bytes);
for (int i = 0; i < nx * ny; ++i)
x[i] = 1.0;
T norm = grid_l1norm(x, nx, ny, hx, hy, 0, 0, 0, 0);
approx(norm, nx * ny * hx * hy);
norm = grid_l1norm(x, nx, ny, hx, hy, 1, 0, 0, 0);
approx(norm, (nx - 1) * ny * hx * hy);
norm = grid_l1norm(x, nx, ny, hx, hy, 0, 1, 0, 0);
approx(norm, nx * (ny - 1) * hx * hy);
norm = grid_l1norm(x, nx, ny, hx, hy, 0, 0, 1, 1);
approx(norm, (nx - 1) * (ny - 1) * hx * hy);
norm = grid_l2norm(x, nx, ny, hx, hy, 0, 0, 0, 0);
approx(norm, nx * ny * hx * hy);
norm = grid_l2norm(x, nx, ny, hx, hy, 1, 0, 0, 0);
approx(norm, (nx - 1) * ny * hx * hy);
norm = grid_l2norm(x, nx, ny, hx, hy, 0, 1, 0, 0);
approx(norm, nx * (ny - 1) * hx * hy);
// Extract the second last point in the corner
grid_x(x, nx, ny, hx);
norm = grid_l2norm(x, nx, ny, hx, hy, nx - 2, ny - 2, 1, 1);
approx(norm, (1 - hx) * (1 - hx) * hx * hy);
return test_report();
}
template <typename T>
int cuda_test_gridnorm(const int nx, const int ny) {
T hx = 1.0 / (nx - 1);
T hy = 1.0 / (ny - 1);
printf(
"CUDA Testing L1 and L2 norm on grid with nx = %d ny = %d and hx = %f hy = %f "
"\n",
nx, ny, hx, hy);
size_t num_bytes = nx * ny * sizeof(T);
T *x = (T*)malloc(num_bytes);
for (int i = 0; i < nx * ny; ++i)
x[i] = 1.0;
T *d_x;
CUDACHECK(cudaMalloc((void**)&d_x, num_bytes));
CUDACHECK(cudaMemcpy(d_x, x, num_bytes, cudaMemcpyHostToDevice));
CUDANorm<L1NORM, T> l1norm;
T norm = l1norm(d_x, nx, ny, hx, hy);
approx(norm, nx * ny * hx * hy);
CUDANorm<L2NORM, T> l2norm;
norm = l2norm(d_x, nx, ny, hx, hy);
approx(norm, nx * ny * hx * hy);
CUDACHECK(cudaFree(d_x));
return test_report();
}
template <typename T>
int cuda_test_gridpoints(const int nx, const int ny, const T h) {
printf("CUDA Testing grid points generation with nx = %d ny = %d and h = %f \n", nx, ny, h);
size_t num_bytes = nx * ny * sizeof(T);
T *d_x, *x;
cudaMalloc((void**)&d_x, num_bytes);
x = (T*)malloc(num_bytes);
cuda_grid_x(d_x, nx, ny, h);
CUDACHECK(cudaMemcpy(x, d_x, num_bytes, cudaMemcpyDeviceToHost));
approx(x[0], 0.0);
approx(x[1], h);
approx(x[nx-1], h * (nx - 1));
approx(x[(ny - 1) * nx - 1], h * (nx - 1));
approx(x[(ny - 1) * nx], 0.0);
cuda_grid_y(d_x, nx, ny, h);
CUDACHECK(cudaMemcpy(x, d_x, num_bytes, cudaMemcpyDeviceToHost));
approx(x[0], 0.0);
approx(x[1], 0.0);
approx(x[nx-1], 0.0);
approx(x[nx], h);
approx(x[nx * (ny - 1)], (ny - 1) * h);
free(x);
CUDACHECK(cudaFree(d_x));
return test_report();
}
template <typename T>
void restriction(const char *axis, const T *xc, T *yc, T *zc, const int nxc,
const int nyc, const T hc, const T *xf, const int nxf, const int nyf, const T hf) {
grid_restrict(yc, nxc, nyc, xf, nxf, nyf);
grid_subtract(zc, xc, yc, nxc, nyc);
T l1_err = grid_l1norm(zc, nxc, nyc, hc, hc, 1, 1, 1, 1);
T l2_err = grid_l2norm(zc, nxc, nyc, hc, hc, 1, 1, 1, 1);
approx(l1_err, 0.0);
approx(l2_err, 0.0);
printf("Restriction in %s l1-error: %g, l2-error: %g \n", axis, l1_err, l2_err);
}
template <typename T>
void prolongation(const char *axis, const T *xf, T *yf, T *zf, const int nxf,
const int nyf, const T hf, const T *xc, const int nxc, const int nyc, const T hc) {
grid_prolongate(yf, nxf, nyf, xc, nxc, nyc);
grid_subtract(zf, xf, yf, nxf, nyf);
T l1_err = grid_l1norm(zf, nxf, nyf, hf, hf);
T l2_err = grid_l2norm(zf, nxf, nyf, hf, hf);
approx(l1_err, 0.0);
approx(l2_err, 0.0);
printf("Prolongation in %s l1-error: %g, l2-error: %g \n", axis, l1_err, l2_err);
}
template <typename T>
void restriction_prolongation_info(const int nxf, const int nyf, const T hf, const int nxc,
const int nyc, const T hc) {
printf(
"Testing grid restriction and prolongation with fine grid "
"[%d %d], hf=%g, and coarse grid [%d %d], hc=%g. \n",
nxf, nyf, hf, nxc, nyc, hc);
}
template <typename T>
int test_restriction_prolongation(const int nxc, const int nyc, const T hc) {
T hf = 0.5 * hc;
int nxf = 2 * (nxc - 1) + 1;
int nyf = 2 * (nyc - 1) + 1;
restriction_prolongation_info(nxf, nyf, hf, nxc, nyc, hc);
size_t num_bytesf = sizeof(T) * nxf * nyf;
size_t num_bytesc = sizeof(T) * nxc * nyc;
T *xf = (T*)malloc(num_bytesf);
T *yf = (T*)malloc(num_bytesf);
T *zf = (T*)malloc(num_bytesf);
T *xc = (T*)malloc(num_bytesc);
T *yc = (T*)malloc(num_bytesc);
T *zc = (T*)malloc(num_bytesc);
// Test restriction and prolongation in the x-direction
grid_x(xf, nxf, nyf, hf);
grid_x(xc, nxc, nyc, hc);
restriction("x", xc, yc, zc, nxc, nyc, hc, xf, nxf, nyf, hf);
prolongation("x", xf, yf, zf, nxf, nyf, hf, xc, nxc, nyc, hc);
// Test restriction and prolongation in the y-direction
grid_y(xf, nxf, nyf, hf);
grid_y(xc, nxc, nyc, hc);
restriction("y", xc, yc, zc, nxc, nyc, hc, xf, nxf, nyf, hf);
prolongation("y", xf, yf, zf, nxf, nyf, hf, xc, nxc, nyc, hc);
free(xf);
free(yf);
free(zf);
free(xc);
free(yc);
free(zc);
return test_report();
}
template <typename T>
void cuda_restriction(const char *axis, const T *xc, T *yc, T *zc, const int nxc,
const int nyc, const T hc, const T *xf, const int nxf, const int nyf, const T hf) {
size_t num_bytes = sizeof(T) * nxc * nyc;
cuda_grid_restrict(yc, nxc, nyc, xf, nxf, nyf);
cuda_grid_subtract(zc, xc, yc, nxc, nyc);
T *hzc = (T*)malloc(num_bytes);
CUDACHECK(cudaMemcpy(hzc, zc, num_bytes, cudaMemcpyDeviceToHost));
//TODO: Compute norms on device once there's support for bounds control
T l1_err = grid_l1norm(hzc, nxc, nyc, hc, hc, 1, 1, 1, 1);
T l2_err = grid_l2norm(hzc, nxc, nyc, hc, hc, 1, 1, 1, 1);
approx(l1_err, 0.0);
approx(l2_err, 0.0);
printf("CUDA Restriction in %s l1-error: %g, l2-error: %g \n", axis, l1_err, l2_err);
}
template <typename T>
void cuda_prolongation(const char *axis, const T *xf, T *yf, T *zf, const int nxf,
const int nyf, const T hf, const T *xc, const int nxc, const int nyc, const T hc) {
size_t num_bytes = sizeof(T) * nxf * nyf;
cuda_grid_prolongate(yf, nxf, nyf, xc, nxc, nyc);
cuda_grid_subtract(zf, xf, yf, nxf, nyf);
T *hzf = (T*)malloc(num_bytes);
CUDACHECK(cudaMemcpy(hzf, zf, num_bytes, cudaMemcpyDeviceToHost));
//TODO: Compute norms on device once there's support for bounds control
T l1_err = grid_l1norm(hzf, nxf, nyf, hf, hf, 1, 1, 1, 1);
T l2_err = grid_l2norm(hzf, nxf, nyf, hf, hf, 1, 1, 1, 1);
approx(l1_err, 0.0);
approx(l2_err, 0.0);
printf("CUDA Prolongation in %s l1-error: %g, l2-error: %g \n", axis, l1_err, l2_err);
}
template <typename T>
int cuda_test_restriction_prolongation(const int nxc, const int nyc, const T hc) {
T hf = 0.5 * hc;
int nxf = 2 * (nxc - 1) + 1;
int nyf = 2 * (nyc - 1) + 1;
restriction_prolongation_info(nxf, nyf, hf, nxc, nyc, hc);
size_t num_bytesf = sizeof(T) * nxf * nyf;
size_t num_bytesc = sizeof(T) * nxc * nyc;
T *xf, *yf, *zf, *xc, *yc, *zc;
cudaMalloc((void**)&xf, num_bytesf);
cudaMalloc((void**)&yf, num_bytesf);
cudaMalloc((void**)&zf, num_bytesf);
cudaMalloc((void**)&xc, num_bytesc);
cudaMalloc((void**)&yc, num_bytesc);
cudaMalloc((void**)&zc, num_bytesc);
cuda_grid_x(xf, nxf, nyf, hf);
cuda_grid_x(xc, nxc, nyc, hc);
cuda_restriction("x", xc, yc, zc, nxc, nyc, hc, xf, nxf, nyf, hf);
cuda_prolongation("x", xf, yf, zf, nxf, nyf, hf, xc, nxc, nyc, hc);
cuda_grid_y(yf, nxf, nyf, hf);
cuda_grid_y(yc, nxc, nyc, hc);
cuda_restriction("y", xc, yc, zc, nxc, nyc, hc, xf, nxf, nyf, hf);
cuda_prolongation("y", xf, yf, zf, nxf, nyf, hf, xc, nxc, nyc, hc);
CUDACHECK(cudaFree(xf));
CUDACHECK(cudaFree(yf));
CUDACHECK(cudaFree(zf));
CUDACHECK(cudaFree(xc));
CUDACHECK(cudaFree(yc));
CUDACHECK(cudaFree(zc));
return test_report();
}
int main(int argc, char **argv) {
int err = 0;
{
int nx = 20;
int ny = 20;
double h = 1.0;
err |= test_gridpoints(nx, ny, h);
err |= cuda_test_gridpoints(nx, ny, h);
}
{
int nx = 21;
int ny = 20;
double h = 0.5;
err |= test_gridpoints(nx, ny, h);
}
{
int nx = 21;
int ny = 31;
err |= test_gridnorm<double>(nx, ny);
err |= cuda_test_gridnorm<double>(nx, ny);
}
{
int nxc = 4;
int nyc = 4;
double hf = 0.3;
err |= test_restriction_prolongation(nxc, nyc, hf);
err |= cuda_test_restriction_prolongation(nxc, nyc, hf);
}
return err;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.